blob: 8ea72dc60f79f96276659da86ad5c6a9228f30d3 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_TX_QUEUED |
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
216 status = ql_wait_cfg(qdev, bit);
217 if (status) {
218 QPRINTK(qdev, IFUP, ERR,
219 "Timed out waiting for CFG to come ready.\n");
220 goto exit;
221 }
222
223 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
224 if (status)
225 goto exit;
226 ql_write32(qdev, ICB_L, (u32) map);
227 ql_write32(qdev, ICB_H, (u32) (map >> 32));
228 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
239 pci_unmap_single(qdev->pdev, map, size, direction);
240 return status;
241}
242
243/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
244int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
245 u32 *value)
246{
247 u32 offset = 0;
248 int status;
249
250 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
251 if (status)
252 return status;
253 switch (type) {
254 case MAC_ADDR_TYPE_MULTI_MAC:
255 case MAC_ADDR_TYPE_CAM_MAC:
256 {
257 status =
258 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800259 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400260 if (status)
261 goto exit;
262 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
263 (index << MAC_ADDR_IDX_SHIFT) | /* index */
264 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
265 status =
266 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800267 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 if (status)
269 goto exit;
270 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
271 status =
272 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800273 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400274 if (status)
275 goto exit;
276 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277 (index << MAC_ADDR_IDX_SHIFT) | /* index */
278 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279 status =
280 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800281 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400282 if (status)
283 goto exit;
284 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285 if (type == MAC_ADDR_TYPE_CAM_MAC) {
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 }
301 break;
302 }
303 case MAC_ADDR_TYPE_VLAN:
304 case MAC_ADDR_TYPE_MULTI_FLTR:
305 default:
306 QPRINTK(qdev, IFUP, CRIT,
307 "Address type %d not yet supported.\n", type);
308 status = -EPERM;
309 }
310exit:
311 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
312 return status;
313}
314
315/* Set up a MAC, multicast or VLAN address for the
316 * inbound frame matching.
317 */
318static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
319 u16 index)
320{
321 u32 offset = 0;
322 int status = 0;
323
324 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
325 if (status)
326 return status;
327 switch (type) {
328 case MAC_ADDR_TYPE_MULTI_MAC:
329 case MAC_ADDR_TYPE_CAM_MAC:
330 {
331 u32 cam_output;
332 u32 upper = (addr[0] << 8) | addr[1];
333 u32 lower =
334 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
335 (addr[5]);
336
337 QPRINTK(qdev, IFUP, INFO,
Johannes Berg7c510e42008-10-27 17:47:26 -0700338 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400339 " at index %d in the CAM.\n",
340 ((type ==
341 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700342 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400343
344 status =
345 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800346 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400347 if (status)
348 goto exit;
349 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
350 (index << MAC_ADDR_IDX_SHIFT) | /* index */
351 type); /* type */
352 ql_write32(qdev, MAC_ADDR_DATA, lower);
353 status =
354 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800355 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400356 if (status)
357 goto exit;
358 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
359 (index << MAC_ADDR_IDX_SHIFT) | /* index */
360 type); /* type */
361 ql_write32(qdev, MAC_ADDR_DATA, upper);
362 status =
363 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800364 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400365 if (status)
366 goto exit;
367 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
368 (index << MAC_ADDR_IDX_SHIFT) | /* index */
369 type); /* type */
370 /* This field should also include the queue id
371 and possibly the function id. Right now we hardcode
372 the route field to NIC core.
373 */
374 if (type == MAC_ADDR_TYPE_CAM_MAC) {
375 cam_output = (CAM_OUT_ROUTE_NIC |
376 (qdev->
377 func << CAM_OUT_FUNC_SHIFT) |
378 (qdev->
379 rss_ring_first_cq_id <<
380 CAM_OUT_CQ_ID_SHIFT));
381 if (qdev->vlgrp)
382 cam_output |= CAM_OUT_RV;
383 /* route to NIC core */
384 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
385 }
386 break;
387 }
388 case MAC_ADDR_TYPE_VLAN:
389 {
390 u32 enable_bit = *((u32 *) &addr[0]);
391 /* For VLAN, the addr actually holds a bit that
392 * either enables or disables the vlan id we are
393 * addressing. It's either MAC_ADDR_E on or off.
394 * That's bit-27 we're talking about.
395 */
396 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
397 (enable_bit ? "Adding" : "Removing"),
398 index, (enable_bit ? "to" : "from"));
399
400 status =
401 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800402 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400403 if (status)
404 goto exit;
405 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
406 (index << MAC_ADDR_IDX_SHIFT) | /* index */
407 type | /* type */
408 enable_bit); /* enable/disable */
409 break;
410 }
411 case MAC_ADDR_TYPE_MULTI_FLTR:
412 default:
413 QPRINTK(qdev, IFUP, CRIT,
414 "Address type %d not yet supported.\n", type);
415 status = -EPERM;
416 }
417exit:
418 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
419 return status;
420}
421
422/* Get a specific frame routing value from the CAM.
423 * Used for debug and reg dump.
424 */
425int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
426{
427 int status = 0;
428
429 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
430 if (status)
431 goto exit;
432
Ron Mercer939678f2009-01-04 17:08:29 -0800433 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400434 if (status)
435 goto exit;
436
437 ql_write32(qdev, RT_IDX,
438 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800439 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400440 if (status)
441 goto exit;
442 *value = ql_read32(qdev, RT_DATA);
443exit:
444 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
445 return status;
446}
447
448/* The NIC function for this chip has 16 routing indexes. Each one can be used
449 * to route different frame types to various inbound queues. We send broadcast/
450 * multicast/error frames to the default queue for slow handling,
451 * and CAM hit/RSS frames to the fast handling queues.
452 */
453static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
454 int enable)
455{
456 int status;
457 u32 value = 0;
458
459 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
460 if (status)
461 return status;
462
463 QPRINTK(qdev, IFUP, DEBUG,
464 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
465 (enable ? "Adding" : "Removing"),
466 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
467 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
468 ((index ==
469 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
470 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
471 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
472 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
473 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
474 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
475 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
476 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
477 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
478 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
479 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
480 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
481 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
482 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
483 (enable ? "to" : "from"));
484
485 switch (mask) {
486 case RT_IDX_CAM_HIT:
487 {
488 value = RT_IDX_DST_CAM_Q | /* dest */
489 RT_IDX_TYPE_NICQ | /* type */
490 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
491 break;
492 }
493 case RT_IDX_VALID: /* Promiscuous Mode frames. */
494 {
495 value = RT_IDX_DST_DFLT_Q | /* dest */
496 RT_IDX_TYPE_NICQ | /* type */
497 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
498 break;
499 }
500 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
501 {
502 value = RT_IDX_DST_DFLT_Q | /* dest */
503 RT_IDX_TYPE_NICQ | /* type */
504 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
505 break;
506 }
507 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
508 {
509 value = RT_IDX_DST_DFLT_Q | /* dest */
510 RT_IDX_TYPE_NICQ | /* type */
511 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
512 break;
513 }
514 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
515 {
516 value = RT_IDX_DST_CAM_Q | /* dest */
517 RT_IDX_TYPE_NICQ | /* type */
518 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
519 break;
520 }
521 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
522 {
523 value = RT_IDX_DST_CAM_Q | /* dest */
524 RT_IDX_TYPE_NICQ | /* type */
525 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
526 break;
527 }
528 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
529 {
530 value = RT_IDX_DST_RSS | /* dest */
531 RT_IDX_TYPE_NICQ | /* type */
532 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 break;
534 }
535 case 0: /* Clear the E-bit on an entry. */
536 {
537 value = RT_IDX_DST_DFLT_Q | /* dest */
538 RT_IDX_TYPE_NICQ | /* type */
539 (index << RT_IDX_IDX_SHIFT);/* index */
540 break;
541 }
542 default:
543 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
544 mask);
545 status = -EPERM;
546 goto exit;
547 }
548
549 if (value) {
550 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
551 if (status)
552 goto exit;
553 value |= (enable ? RT_IDX_E : 0);
554 ql_write32(qdev, RT_IDX, value);
555 ql_write32(qdev, RT_DATA, enable ? mask : 0);
556 }
557exit:
558 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
559 return status;
560}
561
562static void ql_enable_interrupts(struct ql_adapter *qdev)
563{
564 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
565}
566
567static void ql_disable_interrupts(struct ql_adapter *qdev)
568{
569 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
570}
571
572/* If we're running with multiple MSI-X vectors then we enable on the fly.
573 * Otherwise, we may have multiple outstanding workers and don't want to
574 * enable until the last one finishes. In this case, the irq_cnt gets
575 * incremented everytime we queue a worker and decremented everytime
576 * a worker finishes. Once it hits zero we enable the interrupt.
577 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700578u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400579{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700580 u32 var = 0;
581 unsigned long hw_flags = 0;
582 struct intr_context *ctx = qdev->intr_context + intr;
583
584 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
585 /* Always enable if we're MSIX multi interrupts and
586 * it's not the default (zeroeth) interrupt.
587 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400588 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700589 ctx->intr_en_mask);
590 var = ql_read32(qdev, STS);
591 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400592 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700593
594 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
595 if (atomic_dec_and_test(&ctx->irq_cnt)) {
596 ql_write32(qdev, INTR_EN,
597 ctx->intr_en_mask);
598 var = ql_read32(qdev, STS);
599 }
600 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
601 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400602}
603
604static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
605{
606 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700607 unsigned long hw_flags;
608 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400609
Ron Mercerbb0d2152008-10-20 10:30:26 -0700610 /* HW disables for us if we're MSIX multi interrupts and
611 * it's not the default (zeroeth) interrupt.
612 */
613 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
614 return 0;
615
616 ctx = qdev->intr_context + intr;
617 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
618 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400619 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700620 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400621 var = ql_read32(qdev, STS);
622 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700623 atomic_inc(&ctx->irq_cnt);
624 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400625 return var;
626}
627
628static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
629{
630 int i;
631 for (i = 0; i < qdev->intr_count; i++) {
632 /* The enable call does a atomic_dec_and_test
633 * and enables only if the result is zero.
634 * So we precharge it here.
635 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700636 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
637 i == 0))
638 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400639 ql_enable_completion_interrupt(qdev, i);
640 }
641
642}
643
Ron Mercer26351472009-02-02 13:53:57 -0800644static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400645{
646 int status = 0;
647 /* wait for reg to come ready */
648 status = ql_wait_reg_rdy(qdev,
649 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
650 if (status)
651 goto exit;
652 /* set up for reg read */
653 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
654 /* wait for reg to come ready */
655 status = ql_wait_reg_rdy(qdev,
656 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
657 if (status)
658 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800659 /* This data is stored on flash as an array of
660 * __le32. Since ql_read32() returns cpu endian
661 * we need to swap it back.
662 */
663 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400664exit:
665 return status;
666}
667
668static int ql_get_flash_params(struct ql_adapter *qdev)
669{
670 int i;
671 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800672 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800673 u32 offset = 0;
674
675 /* Second function's parameters follow the first
676 * function's.
677 */
678 if (qdev->func)
679 offset = sizeof(qdev->flash) / sizeof(u32);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400680
681 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
682 return -ETIMEDOUT;
683
684 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800685 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400686 if (status) {
687 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
688 goto exit;
689 }
690
691 }
692exit:
693 ql_sem_unlock(qdev, SEM_FLASH_MASK);
694 return status;
695}
696
697/* xgmac register are located behind the xgmac_addr and xgmac_data
698 * register pair. Each read/write requires us to wait for the ready
699 * bit before reading/writing the data.
700 */
701static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
702{
703 int status;
704 /* wait for reg to come ready */
705 status = ql_wait_reg_rdy(qdev,
706 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
707 if (status)
708 return status;
709 /* write the data to the data reg */
710 ql_write32(qdev, XGMAC_DATA, data);
711 /* trigger the write */
712 ql_write32(qdev, XGMAC_ADDR, reg);
713 return status;
714}
715
716/* xgmac register are located behind the xgmac_addr and xgmac_data
717 * register pair. Each read/write requires us to wait for the ready
718 * bit before reading/writing the data.
719 */
720int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
733 if (status)
734 goto exit;
735 /* get the data */
736 *data = ql_read32(qdev, XGMAC_DATA);
737exit:
738 return status;
739}
740
741/* This is used for reading the 64-bit statistics regs. */
742int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
743{
744 int status = 0;
745 u32 hi = 0;
746 u32 lo = 0;
747
748 status = ql_read_xgmac_reg(qdev, reg, &lo);
749 if (status)
750 goto exit;
751
752 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
753 if (status)
754 goto exit;
755
756 *data = (u64) lo | ((u64) hi << 32);
757
758exit:
759 return status;
760}
761
762/* Take the MAC Core out of reset.
763 * Enable statistics counting.
764 * Take the transmitter/receiver out of reset.
765 * This functionality may be done in the MPI firmware at a
766 * later date.
767 */
768static int ql_port_initialize(struct ql_adapter *qdev)
769{
770 int status = 0;
771 u32 data;
772
773 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
774 /* Another function has the semaphore, so
775 * wait for the port init bit to come ready.
776 */
777 QPRINTK(qdev, LINK, INFO,
778 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
779 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
780 if (status) {
781 QPRINTK(qdev, LINK, CRIT,
782 "Port initialize timed out.\n");
783 }
784 return status;
785 }
786
787 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
788 /* Set the core reset. */
789 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
790 if (status)
791 goto end;
792 data |= GLOBAL_CFG_RESET;
793 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
794 if (status)
795 goto end;
796
797 /* Clear the core reset and turn on jumbo for receiver. */
798 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
799 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
800 data |= GLOBAL_CFG_TX_STAT_EN;
801 data |= GLOBAL_CFG_RX_STAT_EN;
802 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
803 if (status)
804 goto end;
805
806 /* Enable transmitter, and clear it's reset. */
807 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
808 if (status)
809 goto end;
810 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
811 data |= TX_CFG_EN; /* Enable the transmitter. */
812 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
813 if (status)
814 goto end;
815
816 /* Enable receiver and clear it's reset. */
817 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
818 if (status)
819 goto end;
820 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
821 data |= RX_CFG_EN; /* Enable the receiver. */
822 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
823 if (status)
824 goto end;
825
826 /* Turn on jumbo. */
827 status =
828 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
829 if (status)
830 goto end;
831 status =
832 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
833 if (status)
834 goto end;
835
836 /* Signal to the world that the port is enabled. */
837 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
838end:
839 ql_sem_unlock(qdev, qdev->xg_sem_mask);
840 return status;
841}
842
843/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -0800844static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845{
846 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
847 rx_ring->lbq_curr_idx++;
848 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
849 rx_ring->lbq_curr_idx = 0;
850 rx_ring->lbq_free_cnt++;
851 return lbq_desc;
852}
853
854/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -0800855static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400856{
857 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
858 rx_ring->sbq_curr_idx++;
859 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
860 rx_ring->sbq_curr_idx = 0;
861 rx_ring->sbq_free_cnt++;
862 return sbq_desc;
863}
864
865/* Update an rx ring index. */
866static void ql_update_cq(struct rx_ring *rx_ring)
867{
868 rx_ring->cnsmr_idx++;
869 rx_ring->curr_entry++;
870 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
871 rx_ring->cnsmr_idx = 0;
872 rx_ring->curr_entry = rx_ring->cq_base;
873 }
874}
875
876static void ql_write_cq_idx(struct rx_ring *rx_ring)
877{
878 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
879}
880
881/* Process (refill) a large buffer queue. */
882static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
883{
884 int clean_idx = rx_ring->lbq_clean_idx;
885 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400886 u64 map;
887 int i;
888
889 while (rx_ring->lbq_free_cnt > 16) {
890 for (i = 0; i < 16; i++) {
891 QPRINTK(qdev, RX_STATUS, DEBUG,
892 "lbq: try cleaning clean_idx = %d.\n",
893 clean_idx);
894 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400895 if (lbq_desc->p.lbq_page == NULL) {
896 QPRINTK(qdev, RX_STATUS, DEBUG,
897 "lbq: getting new page for index %d.\n",
898 lbq_desc->index);
899 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
900 if (lbq_desc->p.lbq_page == NULL) {
Ron Mercer79d2b292009-02-12 16:38:34 -0800901 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400902 QPRINTK(qdev, RX_STATUS, ERR,
903 "Couldn't get a page.\n");
904 return;
905 }
906 map = pci_map_page(qdev->pdev,
907 lbq_desc->p.lbq_page,
908 0, PAGE_SIZE,
909 PCI_DMA_FROMDEVICE);
910 if (pci_dma_mapping_error(qdev->pdev, map)) {
Ron Mercer79d2b292009-02-12 16:38:34 -0800911 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerf2603c22009-02-12 16:37:32 -0800912 put_page(lbq_desc->p.lbq_page);
913 lbq_desc->p.lbq_page = NULL;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400914 QPRINTK(qdev, RX_STATUS, ERR,
915 "PCI mapping failed.\n");
916 return;
917 }
918 pci_unmap_addr_set(lbq_desc, mapaddr, map);
919 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
Ron Mercer2c9a0d42009-01-05 18:19:20 -0800920 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400921 }
922 clean_idx++;
923 if (clean_idx == rx_ring->lbq_len)
924 clean_idx = 0;
925 }
926
927 rx_ring->lbq_clean_idx = clean_idx;
928 rx_ring->lbq_prod_idx += 16;
929 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
930 rx_ring->lbq_prod_idx = 0;
931 QPRINTK(qdev, RX_STATUS, DEBUG,
932 "lbq: updating prod idx = %d.\n",
933 rx_ring->lbq_prod_idx);
934 ql_write_db_reg(rx_ring->lbq_prod_idx,
935 rx_ring->lbq_prod_idx_db_reg);
936 rx_ring->lbq_free_cnt -= 16;
937 }
938}
939
940/* Process (refill) a small buffer queue. */
941static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
942{
943 int clean_idx = rx_ring->sbq_clean_idx;
944 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400945 u64 map;
946 int i;
947
948 while (rx_ring->sbq_free_cnt > 16) {
949 for (i = 0; i < 16; i++) {
950 sbq_desc = &rx_ring->sbq[clean_idx];
951 QPRINTK(qdev, RX_STATUS, DEBUG,
952 "sbq: try cleaning clean_idx = %d.\n",
953 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400954 if (sbq_desc->p.skb == NULL) {
955 QPRINTK(qdev, RX_STATUS, DEBUG,
956 "sbq: getting new skb for index %d.\n",
957 sbq_desc->index);
958 sbq_desc->p.skb =
959 netdev_alloc_skb(qdev->ndev,
960 rx_ring->sbq_buf_size);
961 if (sbq_desc->p.skb == NULL) {
962 QPRINTK(qdev, PROBE, ERR,
963 "Couldn't get an skb.\n");
964 rx_ring->sbq_clean_idx = clean_idx;
965 return;
966 }
967 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
968 map = pci_map_single(qdev->pdev,
969 sbq_desc->p.skb->data,
970 rx_ring->sbq_buf_size /
971 2, PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -0800972 if (pci_dma_mapping_error(qdev->pdev, map)) {
973 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
974 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -0800975 dev_kfree_skb_any(sbq_desc->p.skb);
976 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -0800977 return;
978 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400979 pci_unmap_addr_set(sbq_desc, mapaddr, map);
980 pci_unmap_len_set(sbq_desc, maplen,
981 rx_ring->sbq_buf_size / 2);
Ron Mercer2c9a0d42009-01-05 18:19:20 -0800982 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400983 }
984
985 clean_idx++;
986 if (clean_idx == rx_ring->sbq_len)
987 clean_idx = 0;
988 }
989 rx_ring->sbq_clean_idx = clean_idx;
990 rx_ring->sbq_prod_idx += 16;
991 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
992 rx_ring->sbq_prod_idx = 0;
993 QPRINTK(qdev, RX_STATUS, DEBUG,
994 "sbq: updating prod idx = %d.\n",
995 rx_ring->sbq_prod_idx);
996 ql_write_db_reg(rx_ring->sbq_prod_idx,
997 rx_ring->sbq_prod_idx_db_reg);
998
999 rx_ring->sbq_free_cnt -= 16;
1000 }
1001}
1002
1003static void ql_update_buffer_queues(struct ql_adapter *qdev,
1004 struct rx_ring *rx_ring)
1005{
1006 ql_update_sbq(qdev, rx_ring);
1007 ql_update_lbq(qdev, rx_ring);
1008}
1009
1010/* Unmaps tx buffers. Can be called from send() if a pci mapping
1011 * fails at some stage, or from the interrupt when a tx completes.
1012 */
1013static void ql_unmap_send(struct ql_adapter *qdev,
1014 struct tx_ring_desc *tx_ring_desc, int mapped)
1015{
1016 int i;
1017 for (i = 0; i < mapped; i++) {
1018 if (i == 0 || (i == 7 && mapped > 7)) {
1019 /*
1020 * Unmap the skb->data area, or the
1021 * external sglist (AKA the Outbound
1022 * Address List (OAL)).
1023 * If its the zeroeth element, then it's
1024 * the skb->data area. If it's the 7th
1025 * element and there is more than 6 frags,
1026 * then its an OAL.
1027 */
1028 if (i == 7) {
1029 QPRINTK(qdev, TX_DONE, DEBUG,
1030 "unmapping OAL area.\n");
1031 }
1032 pci_unmap_single(qdev->pdev,
1033 pci_unmap_addr(&tx_ring_desc->map[i],
1034 mapaddr),
1035 pci_unmap_len(&tx_ring_desc->map[i],
1036 maplen),
1037 PCI_DMA_TODEVICE);
1038 } else {
1039 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1040 i);
1041 pci_unmap_page(qdev->pdev,
1042 pci_unmap_addr(&tx_ring_desc->map[i],
1043 mapaddr),
1044 pci_unmap_len(&tx_ring_desc->map[i],
1045 maplen), PCI_DMA_TODEVICE);
1046 }
1047 }
1048
1049}
1050
1051/* Map the buffers for this transmit. This will return
1052 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1053 */
1054static int ql_map_send(struct ql_adapter *qdev,
1055 struct ob_mac_iocb_req *mac_iocb_ptr,
1056 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1057{
1058 int len = skb_headlen(skb);
1059 dma_addr_t map;
1060 int frag_idx, err, map_idx = 0;
1061 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1062 int frag_cnt = skb_shinfo(skb)->nr_frags;
1063
1064 if (frag_cnt) {
1065 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1066 }
1067 /*
1068 * Map the skb buffer first.
1069 */
1070 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1071
1072 err = pci_dma_mapping_error(qdev->pdev, map);
1073 if (err) {
1074 QPRINTK(qdev, TX_QUEUED, ERR,
1075 "PCI mapping failed with error: %d\n", err);
1076
1077 return NETDEV_TX_BUSY;
1078 }
1079
1080 tbd->len = cpu_to_le32(len);
1081 tbd->addr = cpu_to_le64(map);
1082 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1083 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1084 map_idx++;
1085
1086 /*
1087 * This loop fills the remainder of the 8 address descriptors
1088 * in the IOCB. If there are more than 7 fragments, then the
1089 * eighth address desc will point to an external list (OAL).
1090 * When this happens, the remainder of the frags will be stored
1091 * in this list.
1092 */
1093 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1094 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1095 tbd++;
1096 if (frag_idx == 6 && frag_cnt > 7) {
1097 /* Let's tack on an sglist.
1098 * Our control block will now
1099 * look like this:
1100 * iocb->seg[0] = skb->data
1101 * iocb->seg[1] = frag[0]
1102 * iocb->seg[2] = frag[1]
1103 * iocb->seg[3] = frag[2]
1104 * iocb->seg[4] = frag[3]
1105 * iocb->seg[5] = frag[4]
1106 * iocb->seg[6] = frag[5]
1107 * iocb->seg[7] = ptr to OAL (external sglist)
1108 * oal->seg[0] = frag[6]
1109 * oal->seg[1] = frag[7]
1110 * oal->seg[2] = frag[8]
1111 * oal->seg[3] = frag[9]
1112 * oal->seg[4] = frag[10]
1113 * etc...
1114 */
1115 /* Tack on the OAL in the eighth segment of IOCB. */
1116 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1117 sizeof(struct oal),
1118 PCI_DMA_TODEVICE);
1119 err = pci_dma_mapping_error(qdev->pdev, map);
1120 if (err) {
1121 QPRINTK(qdev, TX_QUEUED, ERR,
1122 "PCI mapping outbound address list with error: %d\n",
1123 err);
1124 goto map_error;
1125 }
1126
1127 tbd->addr = cpu_to_le64(map);
1128 /*
1129 * The length is the number of fragments
1130 * that remain to be mapped times the length
1131 * of our sglist (OAL).
1132 */
1133 tbd->len =
1134 cpu_to_le32((sizeof(struct tx_buf_desc) *
1135 (frag_cnt - frag_idx)) | TX_DESC_C);
1136 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1137 map);
1138 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1139 sizeof(struct oal));
1140 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1141 map_idx++;
1142 }
1143
1144 map =
1145 pci_map_page(qdev->pdev, frag->page,
1146 frag->page_offset, frag->size,
1147 PCI_DMA_TODEVICE);
1148
1149 err = pci_dma_mapping_error(qdev->pdev, map);
1150 if (err) {
1151 QPRINTK(qdev, TX_QUEUED, ERR,
1152 "PCI mapping frags failed with error: %d.\n",
1153 err);
1154 goto map_error;
1155 }
1156
1157 tbd->addr = cpu_to_le64(map);
1158 tbd->len = cpu_to_le32(frag->size);
1159 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1160 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1161 frag->size);
1162
1163 }
1164 /* Save the number of segments we've mapped. */
1165 tx_ring_desc->map_cnt = map_idx;
1166 /* Terminate the last segment. */
1167 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1168 return NETDEV_TX_OK;
1169
1170map_error:
1171 /*
1172 * If the first frag mapping failed, then i will be zero.
1173 * This causes the unmap of the skb->data area. Otherwise
1174 * we pass in the number of frags that mapped successfully
1175 * so they can be umapped.
1176 */
1177 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1178 return NETDEV_TX_BUSY;
1179}
1180
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001181static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001182{
1183 void *temp_addr = skb->data;
1184
1185 /* Undo the skb_reserve(skb,32) we did before
1186 * giving to hardware, and realign data on
1187 * a 2-byte boundary.
1188 */
1189 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1190 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1191 skb_copy_to_linear_data(skb, temp_addr,
1192 (unsigned int)len);
1193}
1194
1195/*
1196 * This function builds an skb for the given inbound
1197 * completion. It will be rewritten for readability in the near
1198 * future, but for not it works well.
1199 */
1200static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1201 struct rx_ring *rx_ring,
1202 struct ib_mac_iocb_rsp *ib_mac_rsp)
1203{
1204 struct bq_desc *lbq_desc;
1205 struct bq_desc *sbq_desc;
1206 struct sk_buff *skb = NULL;
1207 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1208 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1209
1210 /*
1211 * Handle the header buffer if present.
1212 */
1213 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1214 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1215 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1216 /*
1217 * Headers fit nicely into a small buffer.
1218 */
1219 sbq_desc = ql_get_curr_sbuf(rx_ring);
1220 pci_unmap_single(qdev->pdev,
1221 pci_unmap_addr(sbq_desc, mapaddr),
1222 pci_unmap_len(sbq_desc, maplen),
1223 PCI_DMA_FROMDEVICE);
1224 skb = sbq_desc->p.skb;
1225 ql_realign_skb(skb, hdr_len);
1226 skb_put(skb, hdr_len);
1227 sbq_desc->p.skb = NULL;
1228 }
1229
1230 /*
1231 * Handle the data buffer(s).
1232 */
1233 if (unlikely(!length)) { /* Is there data too? */
1234 QPRINTK(qdev, RX_STATUS, DEBUG,
1235 "No Data buffer in this packet.\n");
1236 return skb;
1237 }
1238
1239 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1240 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1241 QPRINTK(qdev, RX_STATUS, DEBUG,
1242 "Headers in small, data of %d bytes in small, combine them.\n", length);
1243 /*
1244 * Data is less than small buffer size so it's
1245 * stuffed in a small buffer.
1246 * For this case we append the data
1247 * from the "data" small buffer to the "header" small
1248 * buffer.
1249 */
1250 sbq_desc = ql_get_curr_sbuf(rx_ring);
1251 pci_dma_sync_single_for_cpu(qdev->pdev,
1252 pci_unmap_addr
1253 (sbq_desc, mapaddr),
1254 pci_unmap_len
1255 (sbq_desc, maplen),
1256 PCI_DMA_FROMDEVICE);
1257 memcpy(skb_put(skb, length),
1258 sbq_desc->p.skb->data, length);
1259 pci_dma_sync_single_for_device(qdev->pdev,
1260 pci_unmap_addr
1261 (sbq_desc,
1262 mapaddr),
1263 pci_unmap_len
1264 (sbq_desc,
1265 maplen),
1266 PCI_DMA_FROMDEVICE);
1267 } else {
1268 QPRINTK(qdev, RX_STATUS, DEBUG,
1269 "%d bytes in a single small buffer.\n", length);
1270 sbq_desc = ql_get_curr_sbuf(rx_ring);
1271 skb = sbq_desc->p.skb;
1272 ql_realign_skb(skb, length);
1273 skb_put(skb, length);
1274 pci_unmap_single(qdev->pdev,
1275 pci_unmap_addr(sbq_desc,
1276 mapaddr),
1277 pci_unmap_len(sbq_desc,
1278 maplen),
1279 PCI_DMA_FROMDEVICE);
1280 sbq_desc->p.skb = NULL;
1281 }
1282 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1283 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1284 QPRINTK(qdev, RX_STATUS, DEBUG,
1285 "Header in small, %d bytes in large. Chain large to small!\n", length);
1286 /*
1287 * The data is in a single large buffer. We
1288 * chain it to the header buffer's skb and let
1289 * it rip.
1290 */
1291 lbq_desc = ql_get_curr_lbuf(rx_ring);
1292 pci_unmap_page(qdev->pdev,
1293 pci_unmap_addr(lbq_desc,
1294 mapaddr),
1295 pci_unmap_len(lbq_desc, maplen),
1296 PCI_DMA_FROMDEVICE);
1297 QPRINTK(qdev, RX_STATUS, DEBUG,
1298 "Chaining page to skb.\n");
1299 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1300 0, length);
1301 skb->len += length;
1302 skb->data_len += length;
1303 skb->truesize += length;
1304 lbq_desc->p.lbq_page = NULL;
1305 } else {
1306 /*
1307 * The headers and data are in a single large buffer. We
1308 * copy it to a new skb and let it go. This can happen with
1309 * jumbo mtu on a non-TCP/UDP frame.
1310 */
1311 lbq_desc = ql_get_curr_lbuf(rx_ring);
1312 skb = netdev_alloc_skb(qdev->ndev, length);
1313 if (skb == NULL) {
1314 QPRINTK(qdev, PROBE, DEBUG,
1315 "No skb available, drop the packet.\n");
1316 return NULL;
1317 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001318 pci_unmap_page(qdev->pdev,
1319 pci_unmap_addr(lbq_desc,
1320 mapaddr),
1321 pci_unmap_len(lbq_desc, maplen),
1322 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 skb_reserve(skb, NET_IP_ALIGN);
1324 QPRINTK(qdev, RX_STATUS, DEBUG,
1325 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1326 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1327 0, length);
1328 skb->len += length;
1329 skb->data_len += length;
1330 skb->truesize += length;
1331 length -= length;
1332 lbq_desc->p.lbq_page = NULL;
1333 __pskb_pull_tail(skb,
1334 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1335 VLAN_ETH_HLEN : ETH_HLEN);
1336 }
1337 } else {
1338 /*
1339 * The data is in a chain of large buffers
1340 * pointed to by a small buffer. We loop
1341 * thru and chain them to the our small header
1342 * buffer's skb.
1343 * frags: There are 18 max frags and our small
1344 * buffer will hold 32 of them. The thing is,
1345 * we'll use 3 max for our 9000 byte jumbo
1346 * frames. If the MTU goes up we could
1347 * eventually be in trouble.
1348 */
1349 int size, offset, i = 0;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001350 __le64 *bq, bq_array[8];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001351 sbq_desc = ql_get_curr_sbuf(rx_ring);
1352 pci_unmap_single(qdev->pdev,
1353 pci_unmap_addr(sbq_desc, mapaddr),
1354 pci_unmap_len(sbq_desc, maplen),
1355 PCI_DMA_FROMDEVICE);
1356 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1357 /*
1358 * This is an non TCP/UDP IP frame, so
1359 * the headers aren't split into a small
1360 * buffer. We have to use the small buffer
1361 * that contains our sg list as our skb to
1362 * send upstairs. Copy the sg list here to
1363 * a local buffer and use it to find the
1364 * pages to chain.
1365 */
1366 QPRINTK(qdev, RX_STATUS, DEBUG,
1367 "%d bytes of headers & data in chain of large.\n", length);
1368 skb = sbq_desc->p.skb;
1369 bq = &bq_array[0];
1370 memcpy(bq, skb->data, sizeof(bq_array));
1371 sbq_desc->p.skb = NULL;
1372 skb_reserve(skb, NET_IP_ALIGN);
1373 } else {
1374 QPRINTK(qdev, RX_STATUS, DEBUG,
1375 "Headers in small, %d bytes of data in chain of large.\n", length);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001376 bq = (__le64 *)sbq_desc->p.skb->data;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001377 }
1378 while (length > 0) {
1379 lbq_desc = ql_get_curr_lbuf(rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001380 pci_unmap_page(qdev->pdev,
1381 pci_unmap_addr(lbq_desc,
1382 mapaddr),
1383 pci_unmap_len(lbq_desc,
1384 maplen),
1385 PCI_DMA_FROMDEVICE);
1386 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1387 offset = 0;
1388
1389 QPRINTK(qdev, RX_STATUS, DEBUG,
1390 "Adding page %d to skb for %d bytes.\n",
1391 i, size);
1392 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1393 offset, size);
1394 skb->len += size;
1395 skb->data_len += size;
1396 skb->truesize += size;
1397 length -= size;
1398 lbq_desc->p.lbq_page = NULL;
1399 bq++;
1400 i++;
1401 }
1402 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1403 VLAN_ETH_HLEN : ETH_HLEN);
1404 }
1405 return skb;
1406}
1407
1408/* Process an inbound completion from an rx ring. */
1409static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1410 struct rx_ring *rx_ring,
1411 struct ib_mac_iocb_rsp *ib_mac_rsp)
1412{
1413 struct net_device *ndev = qdev->ndev;
1414 struct sk_buff *skb = NULL;
1415
1416 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1417
1418 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1419 if (unlikely(!skb)) {
1420 QPRINTK(qdev, RX_STATUS, DEBUG,
1421 "No skb available, drop packet.\n");
1422 return;
1423 }
1424
1425 prefetch(skb->data);
1426 skb->dev = ndev;
1427 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1428 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1429 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1430 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1431 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1432 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1433 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1434 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1435 }
1436 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1437 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1438 }
1439 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1440 QPRINTK(qdev, RX_STATUS, ERR,
1441 "Bad checksum for this %s packet.\n",
1442 ((ib_mac_rsp->
1443 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1444 skb->ip_summed = CHECKSUM_NONE;
1445 } else if (qdev->rx_csum &&
1446 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1447 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1448 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1449 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1450 skb->ip_summed = CHECKSUM_UNNECESSARY;
1451 }
1452 qdev->stats.rx_packets++;
1453 qdev->stats.rx_bytes += skb->len;
1454 skb->protocol = eth_type_trans(skb, ndev);
1455 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1456 QPRINTK(qdev, RX_STATUS, DEBUG,
1457 "Passing a VLAN packet upstream.\n");
Ron Mercer7a9deb62009-02-12 16:36:50 -08001458 vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001459 le16_to_cpu(ib_mac_rsp->vlan_id));
1460 } else {
1461 QPRINTK(qdev, RX_STATUS, DEBUG,
1462 "Passing a normal packet upstream.\n");
Ron Mercer7a9deb62009-02-12 16:36:50 -08001463 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001464 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001465}
1466
1467/* Process an outbound completion from an rx ring. */
1468static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1469 struct ob_mac_iocb_rsp *mac_rsp)
1470{
1471 struct tx_ring *tx_ring;
1472 struct tx_ring_desc *tx_ring_desc;
1473
1474 QL_DUMP_OB_MAC_RSP(mac_rsp);
1475 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1476 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1477 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1478 qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1479 qdev->stats.tx_packets++;
1480 dev_kfree_skb(tx_ring_desc->skb);
1481 tx_ring_desc->skb = NULL;
1482
1483 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1484 OB_MAC_IOCB_RSP_S |
1485 OB_MAC_IOCB_RSP_L |
1486 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1487 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1488 QPRINTK(qdev, TX_DONE, WARNING,
1489 "Total descriptor length did not match transfer length.\n");
1490 }
1491 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1492 QPRINTK(qdev, TX_DONE, WARNING,
1493 "Frame too short to be legal, not sent.\n");
1494 }
1495 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1496 QPRINTK(qdev, TX_DONE, WARNING,
1497 "Frame too long, but sent anyway.\n");
1498 }
1499 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1500 QPRINTK(qdev, TX_DONE, WARNING,
1501 "PCI backplane error. Frame not sent.\n");
1502 }
1503 }
1504 atomic_inc(&tx_ring->tx_count);
1505}
1506
1507/* Fire up a handler to reset the MPI processor. */
1508void ql_queue_fw_error(struct ql_adapter *qdev)
1509{
1510 netif_stop_queue(qdev->ndev);
1511 netif_carrier_off(qdev->ndev);
1512 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1513}
1514
1515void ql_queue_asic_error(struct ql_adapter *qdev)
1516{
1517 netif_stop_queue(qdev->ndev);
1518 netif_carrier_off(qdev->ndev);
1519 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001520 /* Clear adapter up bit to signal the recovery
1521 * process that it shouldn't kill the reset worker
1522 * thread
1523 */
1524 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001525 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1526}
1527
1528static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1529 struct ib_ae_iocb_rsp *ib_ae_rsp)
1530{
1531 switch (ib_ae_rsp->event) {
1532 case MGMT_ERR_EVENT:
1533 QPRINTK(qdev, RX_ERR, ERR,
1534 "Management Processor Fatal Error.\n");
1535 ql_queue_fw_error(qdev);
1536 return;
1537
1538 case CAM_LOOKUP_ERR_EVENT:
1539 QPRINTK(qdev, LINK, ERR,
1540 "Multiple CAM hits lookup occurred.\n");
1541 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1542 ql_queue_asic_error(qdev);
1543 return;
1544
1545 case SOFT_ECC_ERROR_EVENT:
1546 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1547 ql_queue_asic_error(qdev);
1548 break;
1549
1550 case PCI_ERR_ANON_BUF_RD:
1551 QPRINTK(qdev, RX_ERR, ERR,
1552 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1553 ib_ae_rsp->q_id);
1554 ql_queue_asic_error(qdev);
1555 break;
1556
1557 default:
1558 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1559 ib_ae_rsp->event);
1560 ql_queue_asic_error(qdev);
1561 break;
1562 }
1563}
1564
1565static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1566{
1567 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001568 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001569 struct ob_mac_iocb_rsp *net_rsp = NULL;
1570 int count = 0;
1571
1572 /* While there are entries in the completion queue. */
1573 while (prod != rx_ring->cnsmr_idx) {
1574
1575 QPRINTK(qdev, RX_STATUS, DEBUG,
1576 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1577 prod, rx_ring->cnsmr_idx);
1578
1579 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1580 rmb();
1581 switch (net_rsp->opcode) {
1582
1583 case OPCODE_OB_MAC_TSO_IOCB:
1584 case OPCODE_OB_MAC_IOCB:
1585 ql_process_mac_tx_intr(qdev, net_rsp);
1586 break;
1587 default:
1588 QPRINTK(qdev, RX_STATUS, DEBUG,
1589 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1590 net_rsp->opcode);
1591 }
1592 count++;
1593 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001594 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001595 }
1596 ql_write_cq_idx(rx_ring);
1597 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1598 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1599 if (atomic_read(&tx_ring->queue_stopped) &&
1600 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1601 /*
1602 * The queue got stopped because the tx_ring was full.
1603 * Wake it up, because it's now at least 25% empty.
1604 */
1605 netif_wake_queue(qdev->ndev);
1606 }
1607
1608 return count;
1609}
1610
1611static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1612{
1613 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001614 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001615 struct ql_net_rsp_iocb *net_rsp;
1616 int count = 0;
1617
1618 /* While there are entries in the completion queue. */
1619 while (prod != rx_ring->cnsmr_idx) {
1620
1621 QPRINTK(qdev, RX_STATUS, DEBUG,
1622 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1623 prod, rx_ring->cnsmr_idx);
1624
1625 net_rsp = rx_ring->curr_entry;
1626 rmb();
1627 switch (net_rsp->opcode) {
1628 case OPCODE_IB_MAC_IOCB:
1629 ql_process_mac_rx_intr(qdev, rx_ring,
1630 (struct ib_mac_iocb_rsp *)
1631 net_rsp);
1632 break;
1633
1634 case OPCODE_IB_AE_IOCB:
1635 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1636 net_rsp);
1637 break;
1638 default:
1639 {
1640 QPRINTK(qdev, RX_STATUS, DEBUG,
1641 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1642 net_rsp->opcode);
1643 }
1644 }
1645 count++;
1646 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001647 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001648 if (count == budget)
1649 break;
1650 }
1651 ql_update_buffer_queues(qdev, rx_ring);
1652 ql_write_cq_idx(rx_ring);
1653 return count;
1654}
1655
1656static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1657{
1658 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1659 struct ql_adapter *qdev = rx_ring->qdev;
1660 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1661
1662 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1663 rx_ring->cq_id);
1664
1665 if (work_done < budget) {
Neil Horman908a7a12008-12-22 20:43:12 -08001666 __netif_rx_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001667 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1668 }
1669 return work_done;
1670}
1671
1672static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1673{
1674 struct ql_adapter *qdev = netdev_priv(ndev);
1675
1676 qdev->vlgrp = grp;
1677 if (grp) {
1678 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1679 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1680 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1681 } else {
1682 QPRINTK(qdev, IFUP, DEBUG,
1683 "Turning off VLAN in NIC_RCV_CFG.\n");
1684 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1685 }
1686}
1687
1688static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1689{
1690 struct ql_adapter *qdev = netdev_priv(ndev);
1691 u32 enable_bit = MAC_ADDR_E;
1692
1693 spin_lock(&qdev->hw_lock);
1694 if (ql_set_mac_addr_reg
1695 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1696 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1697 }
1698 spin_unlock(&qdev->hw_lock);
1699}
1700
1701static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1702{
1703 struct ql_adapter *qdev = netdev_priv(ndev);
1704 u32 enable_bit = 0;
1705
1706 spin_lock(&qdev->hw_lock);
1707 if (ql_set_mac_addr_reg
1708 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1709 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1710 }
1711 spin_unlock(&qdev->hw_lock);
1712
1713}
1714
1715/* Worker thread to process a given rx_ring that is dedicated
1716 * to outbound completions.
1717 */
1718static void ql_tx_clean(struct work_struct *work)
1719{
1720 struct rx_ring *rx_ring =
1721 container_of(work, struct rx_ring, rx_work.work);
1722 ql_clean_outbound_rx_ring(rx_ring);
1723 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1724
1725}
1726
1727/* Worker thread to process a given rx_ring that is dedicated
1728 * to inbound completions.
1729 */
1730static void ql_rx_clean(struct work_struct *work)
1731{
1732 struct rx_ring *rx_ring =
1733 container_of(work, struct rx_ring, rx_work.work);
1734 ql_clean_inbound_rx_ring(rx_ring, 64);
1735 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1736}
1737
1738/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1739static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1740{
1741 struct rx_ring *rx_ring = dev_id;
1742 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1743 &rx_ring->rx_work, 0);
1744 return IRQ_HANDLED;
1745}
1746
1747/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1748static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1749{
1750 struct rx_ring *rx_ring = dev_id;
Neil Horman908a7a12008-12-22 20:43:12 -08001751 netif_rx_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001752 return IRQ_HANDLED;
1753}
1754
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001755/* This handles a fatal error, MPI activity, and the default
1756 * rx_ring in an MSI-X multiple vector environment.
1757 * In MSI/Legacy environment it also process the rest of
1758 * the rx_rings.
1759 */
1760static irqreturn_t qlge_isr(int irq, void *dev_id)
1761{
1762 struct rx_ring *rx_ring = dev_id;
1763 struct ql_adapter *qdev = rx_ring->qdev;
1764 struct intr_context *intr_context = &qdev->intr_context[0];
1765 u32 var;
1766 int i;
1767 int work_done = 0;
1768
Ron Mercerbb0d2152008-10-20 10:30:26 -07001769 spin_lock(&qdev->hw_lock);
1770 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1771 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1772 spin_unlock(&qdev->hw_lock);
1773 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001774 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07001775 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001776
Ron Mercerbb0d2152008-10-20 10:30:26 -07001777 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001778
1779 /*
1780 * Check for fatal error.
1781 */
1782 if (var & STS_FE) {
1783 ql_queue_asic_error(qdev);
1784 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1785 var = ql_read32(qdev, ERR_STS);
1786 QPRINTK(qdev, INTR, ERR,
1787 "Resetting chip. Error Status Register = 0x%x\n", var);
1788 return IRQ_HANDLED;
1789 }
1790
1791 /*
1792 * Check MPI processor activity.
1793 */
1794 if (var & STS_PI) {
1795 /*
1796 * We've got an async event or mailbox completion.
1797 * Handle it and clear the source of the interrupt.
1798 */
1799 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1800 ql_disable_completion_interrupt(qdev, intr_context->intr);
1801 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1802 &qdev->mpi_work, 0);
1803 work_done++;
1804 }
1805
1806 /*
1807 * Check the default queue and wake handler if active.
1808 */
1809 rx_ring = &qdev->rx_ring[0];
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001810 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001811 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1812 ql_disable_completion_interrupt(qdev, intr_context->intr);
1813 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1814 &rx_ring->rx_work, 0);
1815 work_done++;
1816 }
1817
1818 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1819 /*
1820 * Start the DPC for each active queue.
1821 */
1822 for (i = 1; i < qdev->rx_ring_count; i++) {
1823 rx_ring = &qdev->rx_ring[i];
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001824 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001825 rx_ring->cnsmr_idx) {
1826 QPRINTK(qdev, INTR, INFO,
1827 "Waking handler for rx_ring[%d].\n", i);
1828 ql_disable_completion_interrupt(qdev,
1829 intr_context->
1830 intr);
1831 if (i < qdev->rss_ring_first_cq_id)
1832 queue_delayed_work_on(rx_ring->cpu,
1833 qdev->q_workqueue,
1834 &rx_ring->rx_work,
1835 0);
1836 else
Neil Horman908a7a12008-12-22 20:43:12 -08001837 netif_rx_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001838 work_done++;
1839 }
1840 }
1841 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07001842 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001843 return work_done ? IRQ_HANDLED : IRQ_NONE;
1844}
1845
1846static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1847{
1848
1849 if (skb_is_gso(skb)) {
1850 int err;
1851 if (skb_header_cloned(skb)) {
1852 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1853 if (err)
1854 return err;
1855 }
1856
1857 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1858 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1859 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1860 mac_iocb_ptr->total_hdrs_len =
1861 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1862 mac_iocb_ptr->net_trans_offset =
1863 cpu_to_le16(skb_network_offset(skb) |
1864 skb_transport_offset(skb)
1865 << OB_MAC_TRANSPORT_HDR_SHIFT);
1866 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1867 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1868 if (likely(skb->protocol == htons(ETH_P_IP))) {
1869 struct iphdr *iph = ip_hdr(skb);
1870 iph->check = 0;
1871 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1872 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1873 iph->daddr, 0,
1874 IPPROTO_TCP,
1875 0);
1876 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1877 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1878 tcp_hdr(skb)->check =
1879 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1880 &ipv6_hdr(skb)->daddr,
1881 0, IPPROTO_TCP, 0);
1882 }
1883 return 1;
1884 }
1885 return 0;
1886}
1887
1888static void ql_hw_csum_setup(struct sk_buff *skb,
1889 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1890{
1891 int len;
1892 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08001893 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001894 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1895 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1896 mac_iocb_ptr->net_trans_offset =
1897 cpu_to_le16(skb_network_offset(skb) |
1898 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1899
1900 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1901 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1902 if (likely(iph->protocol == IPPROTO_TCP)) {
1903 check = &(tcp_hdr(skb)->check);
1904 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1905 mac_iocb_ptr->total_hdrs_len =
1906 cpu_to_le16(skb_transport_offset(skb) +
1907 (tcp_hdr(skb)->doff << 2));
1908 } else {
1909 check = &(udp_hdr(skb)->check);
1910 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1911 mac_iocb_ptr->total_hdrs_len =
1912 cpu_to_le16(skb_transport_offset(skb) +
1913 sizeof(struct udphdr));
1914 }
1915 *check = ~csum_tcpudp_magic(iph->saddr,
1916 iph->daddr, len, iph->protocol, 0);
1917}
1918
1919static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1920{
1921 struct tx_ring_desc *tx_ring_desc;
1922 struct ob_mac_iocb_req *mac_iocb_ptr;
1923 struct ql_adapter *qdev = netdev_priv(ndev);
1924 int tso;
1925 struct tx_ring *tx_ring;
1926 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1927
1928 tx_ring = &qdev->tx_ring[tx_ring_idx];
1929
1930 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1931 QPRINTK(qdev, TX_QUEUED, INFO,
1932 "%s: shutting down tx queue %d du to lack of resources.\n",
1933 __func__, tx_ring_idx);
1934 netif_stop_queue(ndev);
1935 atomic_inc(&tx_ring->queue_stopped);
1936 return NETDEV_TX_BUSY;
1937 }
1938 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1939 mac_iocb_ptr = tx_ring_desc->queue_entry;
1940 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001941
1942 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1943 mac_iocb_ptr->tid = tx_ring_desc->index;
1944 /* We use the upper 32-bits to store the tx queue for this IO.
1945 * When we get the completion we can use it to establish the context.
1946 */
1947 mac_iocb_ptr->txq_idx = tx_ring_idx;
1948 tx_ring_desc->skb = skb;
1949
1950 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1951
1952 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1953 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1954 vlan_tx_tag_get(skb));
1955 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1956 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1957 }
1958 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1959 if (tso < 0) {
1960 dev_kfree_skb_any(skb);
1961 return NETDEV_TX_OK;
1962 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1963 ql_hw_csum_setup(skb,
1964 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1965 }
Ron Mercer0d979f72009-02-12 16:38:03 -08001966 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
1967 NETDEV_TX_OK) {
1968 QPRINTK(qdev, TX_QUEUED, ERR,
1969 "Could not map the segments.\n");
1970 return NETDEV_TX_BUSY;
1971 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001972 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1973 tx_ring->prod_idx++;
1974 if (tx_ring->prod_idx == tx_ring->wq_len)
1975 tx_ring->prod_idx = 0;
1976 wmb();
1977
1978 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1979 ndev->trans_start = jiffies;
1980 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1981 tx_ring->prod_idx, skb->len);
1982
1983 atomic_dec(&tx_ring->tx_count);
1984 return NETDEV_TX_OK;
1985}
1986
1987static void ql_free_shadow_space(struct ql_adapter *qdev)
1988{
1989 if (qdev->rx_ring_shadow_reg_area) {
1990 pci_free_consistent(qdev->pdev,
1991 PAGE_SIZE,
1992 qdev->rx_ring_shadow_reg_area,
1993 qdev->rx_ring_shadow_reg_dma);
1994 qdev->rx_ring_shadow_reg_area = NULL;
1995 }
1996 if (qdev->tx_ring_shadow_reg_area) {
1997 pci_free_consistent(qdev->pdev,
1998 PAGE_SIZE,
1999 qdev->tx_ring_shadow_reg_area,
2000 qdev->tx_ring_shadow_reg_dma);
2001 qdev->tx_ring_shadow_reg_area = NULL;
2002 }
2003}
2004
2005static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2006{
2007 qdev->rx_ring_shadow_reg_area =
2008 pci_alloc_consistent(qdev->pdev,
2009 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2010 if (qdev->rx_ring_shadow_reg_area == NULL) {
2011 QPRINTK(qdev, IFUP, ERR,
2012 "Allocation of RX shadow space failed.\n");
2013 return -ENOMEM;
2014 }
2015 qdev->tx_ring_shadow_reg_area =
2016 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2017 &qdev->tx_ring_shadow_reg_dma);
2018 if (qdev->tx_ring_shadow_reg_area == NULL) {
2019 QPRINTK(qdev, IFUP, ERR,
2020 "Allocation of TX shadow space failed.\n");
2021 goto err_wqp_sh_area;
2022 }
2023 return 0;
2024
2025err_wqp_sh_area:
2026 pci_free_consistent(qdev->pdev,
2027 PAGE_SIZE,
2028 qdev->rx_ring_shadow_reg_area,
2029 qdev->rx_ring_shadow_reg_dma);
2030 return -ENOMEM;
2031}
2032
2033static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2034{
2035 struct tx_ring_desc *tx_ring_desc;
2036 int i;
2037 struct ob_mac_iocb_req *mac_iocb_ptr;
2038
2039 mac_iocb_ptr = tx_ring->wq_base;
2040 tx_ring_desc = tx_ring->q;
2041 for (i = 0; i < tx_ring->wq_len; i++) {
2042 tx_ring_desc->index = i;
2043 tx_ring_desc->skb = NULL;
2044 tx_ring_desc->queue_entry = mac_iocb_ptr;
2045 mac_iocb_ptr++;
2046 tx_ring_desc++;
2047 }
2048 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2049 atomic_set(&tx_ring->queue_stopped, 0);
2050}
2051
2052static void ql_free_tx_resources(struct ql_adapter *qdev,
2053 struct tx_ring *tx_ring)
2054{
2055 if (tx_ring->wq_base) {
2056 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2057 tx_ring->wq_base, tx_ring->wq_base_dma);
2058 tx_ring->wq_base = NULL;
2059 }
2060 kfree(tx_ring->q);
2061 tx_ring->q = NULL;
2062}
2063
2064static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2065 struct tx_ring *tx_ring)
2066{
2067 tx_ring->wq_base =
2068 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2069 &tx_ring->wq_base_dma);
2070
2071 if ((tx_ring->wq_base == NULL)
2072 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2073 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2074 return -ENOMEM;
2075 }
2076 tx_ring->q =
2077 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2078 if (tx_ring->q == NULL)
2079 goto err;
2080
2081 return 0;
2082err:
2083 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2084 tx_ring->wq_base, tx_ring->wq_base_dma);
2085 return -ENOMEM;
2086}
2087
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002088static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002089{
2090 int i;
2091 struct bq_desc *lbq_desc;
2092
2093 for (i = 0; i < rx_ring->lbq_len; i++) {
2094 lbq_desc = &rx_ring->lbq[i];
2095 if (lbq_desc->p.lbq_page) {
2096 pci_unmap_page(qdev->pdev,
2097 pci_unmap_addr(lbq_desc, mapaddr),
2098 pci_unmap_len(lbq_desc, maplen),
2099 PCI_DMA_FROMDEVICE);
2100
2101 put_page(lbq_desc->p.lbq_page);
2102 lbq_desc->p.lbq_page = NULL;
2103 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002104 }
2105}
2106
2107/*
2108 * Allocate and map a page for each element of the lbq.
2109 */
2110static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2111 struct rx_ring *rx_ring)
2112{
2113 int i;
2114 struct bq_desc *lbq_desc;
2115 u64 map;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002116 __le64 *bq = rx_ring->lbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002117
2118 for (i = 0; i < rx_ring->lbq_len; i++) {
2119 lbq_desc = &rx_ring->lbq[i];
2120 memset(lbq_desc, 0, sizeof(lbq_desc));
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002121 lbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 lbq_desc->index = i;
2123 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2124 if (unlikely(!lbq_desc->p.lbq_page)) {
2125 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2126 goto mem_error;
2127 } else {
2128 map = pci_map_page(qdev->pdev,
2129 lbq_desc->p.lbq_page,
2130 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2131 if (pci_dma_mapping_error(qdev->pdev, map)) {
2132 QPRINTK(qdev, IFUP, ERR,
2133 "PCI mapping failed.\n");
2134 goto mem_error;
2135 }
2136 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2137 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002138 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002139 }
2140 bq++;
2141 }
2142 return 0;
2143mem_error:
2144 ql_free_lbq_buffers(qdev, rx_ring);
2145 return -ENOMEM;
2146}
2147
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002148static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002149{
2150 int i;
2151 struct bq_desc *sbq_desc;
2152
2153 for (i = 0; i < rx_ring->sbq_len; i++) {
2154 sbq_desc = &rx_ring->sbq[i];
2155 if (sbq_desc == NULL) {
2156 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2157 return;
2158 }
2159 if (sbq_desc->p.skb) {
2160 pci_unmap_single(qdev->pdev,
2161 pci_unmap_addr(sbq_desc, mapaddr),
2162 pci_unmap_len(sbq_desc, maplen),
2163 PCI_DMA_FROMDEVICE);
2164 dev_kfree_skb(sbq_desc->p.skb);
2165 sbq_desc->p.skb = NULL;
2166 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 }
2168}
2169
2170/* Allocate and map an skb for each element of the sbq. */
2171static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2172 struct rx_ring *rx_ring)
2173{
2174 int i;
2175 struct bq_desc *sbq_desc;
2176 struct sk_buff *skb;
2177 u64 map;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002178 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002179
2180 for (i = 0; i < rx_ring->sbq_len; i++) {
2181 sbq_desc = &rx_ring->sbq[i];
2182 memset(sbq_desc, 0, sizeof(sbq_desc));
2183 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002184 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2186 if (unlikely(!skb)) {
2187 /* Better luck next round */
2188 QPRINTK(qdev, IFUP, ERR,
2189 "small buff alloc failed for %d bytes at index %d.\n",
2190 rx_ring->sbq_buf_size, i);
2191 goto mem_err;
2192 }
2193 skb_reserve(skb, QLGE_SB_PAD);
2194 sbq_desc->p.skb = skb;
2195 /*
2196 * Map only half the buffer. Because the
2197 * other half may get some data copied to it
2198 * when the completion arrives.
2199 */
2200 map = pci_map_single(qdev->pdev,
2201 skb->data,
2202 rx_ring->sbq_buf_size / 2,
2203 PCI_DMA_FROMDEVICE);
2204 if (pci_dma_mapping_error(qdev->pdev, map)) {
2205 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2206 goto mem_err;
2207 }
2208 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2209 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002210 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002211 bq++;
2212 }
2213 return 0;
2214mem_err:
2215 ql_free_sbq_buffers(qdev, rx_ring);
2216 return -ENOMEM;
2217}
2218
2219static void ql_free_rx_resources(struct ql_adapter *qdev,
2220 struct rx_ring *rx_ring)
2221{
2222 if (rx_ring->sbq_len)
2223 ql_free_sbq_buffers(qdev, rx_ring);
2224 if (rx_ring->lbq_len)
2225 ql_free_lbq_buffers(qdev, rx_ring);
2226
2227 /* Free the small buffer queue. */
2228 if (rx_ring->sbq_base) {
2229 pci_free_consistent(qdev->pdev,
2230 rx_ring->sbq_size,
2231 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2232 rx_ring->sbq_base = NULL;
2233 }
2234
2235 /* Free the small buffer queue control blocks. */
2236 kfree(rx_ring->sbq);
2237 rx_ring->sbq = NULL;
2238
2239 /* Free the large buffer queue. */
2240 if (rx_ring->lbq_base) {
2241 pci_free_consistent(qdev->pdev,
2242 rx_ring->lbq_size,
2243 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2244 rx_ring->lbq_base = NULL;
2245 }
2246
2247 /* Free the large buffer queue control blocks. */
2248 kfree(rx_ring->lbq);
2249 rx_ring->lbq = NULL;
2250
2251 /* Free the rx queue. */
2252 if (rx_ring->cq_base) {
2253 pci_free_consistent(qdev->pdev,
2254 rx_ring->cq_size,
2255 rx_ring->cq_base, rx_ring->cq_base_dma);
2256 rx_ring->cq_base = NULL;
2257 }
2258}
2259
2260/* Allocate queues and buffers for this completions queue based
2261 * on the values in the parameter structure. */
2262static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2263 struct rx_ring *rx_ring)
2264{
2265
2266 /*
2267 * Allocate the completion queue for this rx_ring.
2268 */
2269 rx_ring->cq_base =
2270 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2271 &rx_ring->cq_base_dma);
2272
2273 if (rx_ring->cq_base == NULL) {
2274 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2275 return -ENOMEM;
2276 }
2277
2278 if (rx_ring->sbq_len) {
2279 /*
2280 * Allocate small buffer queue.
2281 */
2282 rx_ring->sbq_base =
2283 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2284 &rx_ring->sbq_base_dma);
2285
2286 if (rx_ring->sbq_base == NULL) {
2287 QPRINTK(qdev, IFUP, ERR,
2288 "Small buffer queue allocation failed.\n");
2289 goto err_mem;
2290 }
2291
2292 /*
2293 * Allocate small buffer queue control blocks.
2294 */
2295 rx_ring->sbq =
2296 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2297 GFP_KERNEL);
2298 if (rx_ring->sbq == NULL) {
2299 QPRINTK(qdev, IFUP, ERR,
2300 "Small buffer queue control block allocation failed.\n");
2301 goto err_mem;
2302 }
2303
2304 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2305 QPRINTK(qdev, IFUP, ERR,
2306 "Small buffer allocation failed.\n");
2307 goto err_mem;
2308 }
2309 }
2310
2311 if (rx_ring->lbq_len) {
2312 /*
2313 * Allocate large buffer queue.
2314 */
2315 rx_ring->lbq_base =
2316 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2317 &rx_ring->lbq_base_dma);
2318
2319 if (rx_ring->lbq_base == NULL) {
2320 QPRINTK(qdev, IFUP, ERR,
2321 "Large buffer queue allocation failed.\n");
2322 goto err_mem;
2323 }
2324 /*
2325 * Allocate large buffer queue control blocks.
2326 */
2327 rx_ring->lbq =
2328 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2329 GFP_KERNEL);
2330 if (rx_ring->lbq == NULL) {
2331 QPRINTK(qdev, IFUP, ERR,
2332 "Large buffer queue control block allocation failed.\n");
2333 goto err_mem;
2334 }
2335
2336 /*
2337 * Allocate the buffers.
2338 */
2339 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2340 QPRINTK(qdev, IFUP, ERR,
2341 "Large buffer allocation failed.\n");
2342 goto err_mem;
2343 }
2344 }
2345
2346 return 0;
2347
2348err_mem:
2349 ql_free_rx_resources(qdev, rx_ring);
2350 return -ENOMEM;
2351}
2352
2353static void ql_tx_ring_clean(struct ql_adapter *qdev)
2354{
2355 struct tx_ring *tx_ring;
2356 struct tx_ring_desc *tx_ring_desc;
2357 int i, j;
2358
2359 /*
2360 * Loop through all queues and free
2361 * any resources.
2362 */
2363 for (j = 0; j < qdev->tx_ring_count; j++) {
2364 tx_ring = &qdev->tx_ring[j];
2365 for (i = 0; i < tx_ring->wq_len; i++) {
2366 tx_ring_desc = &tx_ring->q[i];
2367 if (tx_ring_desc && tx_ring_desc->skb) {
2368 QPRINTK(qdev, IFDOWN, ERR,
2369 "Freeing lost SKB %p, from queue %d, index %d.\n",
2370 tx_ring_desc->skb, j,
2371 tx_ring_desc->index);
2372 ql_unmap_send(qdev, tx_ring_desc,
2373 tx_ring_desc->map_cnt);
2374 dev_kfree_skb(tx_ring_desc->skb);
2375 tx_ring_desc->skb = NULL;
2376 }
2377 }
2378 }
2379}
2380
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002381static void ql_free_mem_resources(struct ql_adapter *qdev)
2382{
2383 int i;
2384
2385 for (i = 0; i < qdev->tx_ring_count; i++)
2386 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2387 for (i = 0; i < qdev->rx_ring_count; i++)
2388 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2389 ql_free_shadow_space(qdev);
2390}
2391
2392static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2393{
2394 int i;
2395
2396 /* Allocate space for our shadow registers and such. */
2397 if (ql_alloc_shadow_space(qdev))
2398 return -ENOMEM;
2399
2400 for (i = 0; i < qdev->rx_ring_count; i++) {
2401 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2402 QPRINTK(qdev, IFUP, ERR,
2403 "RX resource allocation failed.\n");
2404 goto err_mem;
2405 }
2406 }
2407 /* Allocate tx queue resources */
2408 for (i = 0; i < qdev->tx_ring_count; i++) {
2409 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2410 QPRINTK(qdev, IFUP, ERR,
2411 "TX resource allocation failed.\n");
2412 goto err_mem;
2413 }
2414 }
2415 return 0;
2416
2417err_mem:
2418 ql_free_mem_resources(qdev);
2419 return -ENOMEM;
2420}
2421
2422/* Set up the rx ring control block and pass it to the chip.
2423 * The control block is defined as
2424 * "Completion Queue Initialization Control Block", or cqicb.
2425 */
2426static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2427{
2428 struct cqicb *cqicb = &rx_ring->cqicb;
2429 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2430 (rx_ring->cq_id * sizeof(u64) * 4);
2431 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2432 (rx_ring->cq_id * sizeof(u64) * 4);
2433 void __iomem *doorbell_area =
2434 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2435 int err = 0;
2436 u16 bq_len;
2437
2438 /* Set up the shadow registers for this ring. */
2439 rx_ring->prod_idx_sh_reg = shadow_reg;
2440 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2441 shadow_reg += sizeof(u64);
2442 shadow_reg_dma += sizeof(u64);
2443 rx_ring->lbq_base_indirect = shadow_reg;
2444 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2445 shadow_reg += sizeof(u64);
2446 shadow_reg_dma += sizeof(u64);
2447 rx_ring->sbq_base_indirect = shadow_reg;
2448 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2449
2450 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002451 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002452 rx_ring->cnsmr_idx = 0;
2453 rx_ring->curr_entry = rx_ring->cq_base;
2454
2455 /* PCI doorbell mem area + 0x04 for valid register */
2456 rx_ring->valid_db_reg = doorbell_area + 0x04;
2457
2458 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002459 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002460
2461 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002462 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002463
2464 memset((void *)cqicb, 0, sizeof(struct cqicb));
2465 cqicb->msix_vect = rx_ring->irq;
2466
Ron Mercer459caf52009-01-04 17:08:11 -08002467 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2468 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002469
Ron Mercer97345522009-01-09 11:31:50 +00002470 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002471
Ron Mercer97345522009-01-09 11:31:50 +00002472 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002473
2474 /*
2475 * Set up the control block load flags.
2476 */
2477 cqicb->flags = FLAGS_LC | /* Load queue base address */
2478 FLAGS_LV | /* Load MSI-X vector */
2479 FLAGS_LI; /* Load irq delay values */
2480 if (rx_ring->lbq_len) {
2481 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2482 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
Ron Mercer97345522009-01-09 11:31:50 +00002483 cqicb->lbq_addr =
2484 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002485 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2486 (u16) rx_ring->lbq_buf_size;
2487 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2488 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2489 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002490 cqicb->lbq_len = cpu_to_le16(bq_len);
2491 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2492 rx_ring->lbq_curr_idx = 0;
2493 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2494 rx_ring->lbq_free_cnt = 16;
2495 }
2496 if (rx_ring->sbq_len) {
2497 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2498 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
Ron Mercer97345522009-01-09 11:31:50 +00002499 cqicb->sbq_addr =
2500 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002501 cqicb->sbq_buf_size =
2502 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
Ron Mercer459caf52009-01-04 17:08:11 -08002503 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2504 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002505 cqicb->sbq_len = cpu_to_le16(bq_len);
2506 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2507 rx_ring->sbq_curr_idx = 0;
2508 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2509 rx_ring->sbq_free_cnt = 16;
2510 }
2511 switch (rx_ring->type) {
2512 case TX_Q:
2513 /* If there's only one interrupt, then we use
2514 * worker threads to process the outbound
2515 * completion handling rx_rings. We do this so
2516 * they can be run on multiple CPUs. There is
2517 * room to play with this more where we would only
2518 * run in a worker if there are more than x number
2519 * of outbound completions on the queue and more
2520 * than one queue active. Some threshold that
2521 * would indicate a benefit in spite of the cost
2522 * of a context switch.
2523 * If there's more than one interrupt, then the
2524 * outbound completions are processed in the ISR.
2525 */
2526 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2527 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2528 else {
2529 /* With all debug warnings on we see a WARN_ON message
2530 * when we free the skb in the interrupt context.
2531 */
2532 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2533 }
2534 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2535 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2536 break;
2537 case DEFAULT_Q:
2538 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2539 cqicb->irq_delay = 0;
2540 cqicb->pkt_delay = 0;
2541 break;
2542 case RX_Q:
2543 /* Inbound completion handling rx_rings run in
2544 * separate NAPI contexts.
2545 */
2546 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2547 64);
2548 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2549 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2550 break;
2551 default:
2552 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2553 rx_ring->type);
2554 }
2555 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2556 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2557 CFG_LCQ, rx_ring->cq_id);
2558 if (err) {
2559 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2560 return err;
2561 }
2562 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2563 /*
2564 * Advance the producer index for the buffer queues.
2565 */
2566 wmb();
2567 if (rx_ring->lbq_len)
2568 ql_write_db_reg(rx_ring->lbq_prod_idx,
2569 rx_ring->lbq_prod_idx_db_reg);
2570 if (rx_ring->sbq_len)
2571 ql_write_db_reg(rx_ring->sbq_prod_idx,
2572 rx_ring->sbq_prod_idx_db_reg);
2573 return err;
2574}
2575
2576static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2577{
2578 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2579 void __iomem *doorbell_area =
2580 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2581 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2582 (tx_ring->wq_id * sizeof(u64));
2583 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2584 (tx_ring->wq_id * sizeof(u64));
2585 int err = 0;
2586
2587 /*
2588 * Assign doorbell registers for this tx_ring.
2589 */
2590 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002591 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002592 tx_ring->prod_idx = 0;
2593 /* TX PCI doorbell mem area + 0x04 */
2594 tx_ring->valid_db_reg = doorbell_area + 0x04;
2595
2596 /*
2597 * Assign shadow registers for this tx_ring.
2598 */
2599 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2600 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2601
2602 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2603 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2604 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2605 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2606 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002607 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002608
Ron Mercer97345522009-01-09 11:31:50 +00002609 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002610
2611 ql_init_tx_ring(qdev, tx_ring);
2612
2613 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2614 (u16) tx_ring->wq_id);
2615 if (err) {
2616 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2617 return err;
2618 }
2619 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2620 return err;
2621}
2622
2623static void ql_disable_msix(struct ql_adapter *qdev)
2624{
2625 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2626 pci_disable_msix(qdev->pdev);
2627 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2628 kfree(qdev->msi_x_entry);
2629 qdev->msi_x_entry = NULL;
2630 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2631 pci_disable_msi(qdev->pdev);
2632 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2633 }
2634}
2635
2636static void ql_enable_msix(struct ql_adapter *qdev)
2637{
2638 int i;
2639
2640 qdev->intr_count = 1;
2641 /* Get the MSIX vectors. */
2642 if (irq_type == MSIX_IRQ) {
2643 /* Try to alloc space for the msix struct,
2644 * if it fails then go to MSI/legacy.
2645 */
2646 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2647 sizeof(struct msix_entry),
2648 GFP_KERNEL);
2649 if (!qdev->msi_x_entry) {
2650 irq_type = MSI_IRQ;
2651 goto msi;
2652 }
2653
2654 for (i = 0; i < qdev->rx_ring_count; i++)
2655 qdev->msi_x_entry[i].entry = i;
2656
2657 if (!pci_enable_msix
2658 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2659 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2660 qdev->intr_count = qdev->rx_ring_count;
2661 QPRINTK(qdev, IFUP, INFO,
2662 "MSI-X Enabled, got %d vectors.\n",
2663 qdev->intr_count);
2664 return;
2665 } else {
2666 kfree(qdev->msi_x_entry);
2667 qdev->msi_x_entry = NULL;
2668 QPRINTK(qdev, IFUP, WARNING,
2669 "MSI-X Enable failed, trying MSI.\n");
2670 irq_type = MSI_IRQ;
2671 }
2672 }
2673msi:
2674 if (irq_type == MSI_IRQ) {
2675 if (!pci_enable_msi(qdev->pdev)) {
2676 set_bit(QL_MSI_ENABLED, &qdev->flags);
2677 QPRINTK(qdev, IFUP, INFO,
2678 "Running with MSI interrupts.\n");
2679 return;
2680 }
2681 }
2682 irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002683 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2684}
2685
2686/*
2687 * Here we build the intr_context structures based on
2688 * our rx_ring count and intr vector count.
2689 * The intr_context structure is used to hook each vector
2690 * to possibly different handlers.
2691 */
2692static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2693{
2694 int i = 0;
2695 struct intr_context *intr_context = &qdev->intr_context[0];
2696
2697 ql_enable_msix(qdev);
2698
2699 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2700 /* Each rx_ring has it's
2701 * own intr_context since we have separate
2702 * vectors for each queue.
2703 * This only true when MSI-X is enabled.
2704 */
2705 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2706 qdev->rx_ring[i].irq = i;
2707 intr_context->intr = i;
2708 intr_context->qdev = qdev;
2709 /*
2710 * We set up each vectors enable/disable/read bits so
2711 * there's no bit/mask calculations in the critical path.
2712 */
2713 intr_context->intr_en_mask =
2714 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2715 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2716 | i;
2717 intr_context->intr_dis_mask =
2718 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2719 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2720 INTR_EN_IHD | i;
2721 intr_context->intr_read_mask =
2722 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2723 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2724 i;
2725
2726 if (i == 0) {
2727 /*
2728 * Default queue handles bcast/mcast plus
2729 * async events. Needs buffers.
2730 */
2731 intr_context->handler = qlge_isr;
2732 sprintf(intr_context->name, "%s-default-queue",
2733 qdev->ndev->name);
2734 } else if (i < qdev->rss_ring_first_cq_id) {
2735 /*
2736 * Outbound queue is for outbound completions only.
2737 */
2738 intr_context->handler = qlge_msix_tx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002739 sprintf(intr_context->name, "%s-tx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002740 qdev->ndev->name, i);
2741 } else {
2742 /*
2743 * Inbound queues handle unicast frames only.
2744 */
2745 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002746 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747 qdev->ndev->name, i);
2748 }
2749 }
2750 } else {
2751 /*
2752 * All rx_rings use the same intr_context since
2753 * there is only one vector.
2754 */
2755 intr_context->intr = 0;
2756 intr_context->qdev = qdev;
2757 /*
2758 * We set up each vectors enable/disable/read bits so
2759 * there's no bit/mask calculations in the critical path.
2760 */
2761 intr_context->intr_en_mask =
2762 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2763 intr_context->intr_dis_mask =
2764 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2765 INTR_EN_TYPE_DISABLE;
2766 intr_context->intr_read_mask =
2767 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2768 /*
2769 * Single interrupt means one handler for all rings.
2770 */
2771 intr_context->handler = qlge_isr;
2772 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2773 for (i = 0; i < qdev->rx_ring_count; i++)
2774 qdev->rx_ring[i].irq = 0;
2775 }
2776}
2777
2778static void ql_free_irq(struct ql_adapter *qdev)
2779{
2780 int i;
2781 struct intr_context *intr_context = &qdev->intr_context[0];
2782
2783 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2784 if (intr_context->hooked) {
2785 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2786 free_irq(qdev->msi_x_entry[i].vector,
2787 &qdev->rx_ring[i]);
2788 QPRINTK(qdev, IFDOWN, ERR,
2789 "freeing msix interrupt %d.\n", i);
2790 } else {
2791 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2792 QPRINTK(qdev, IFDOWN, ERR,
2793 "freeing msi interrupt %d.\n", i);
2794 }
2795 }
2796 }
2797 ql_disable_msix(qdev);
2798}
2799
2800static int ql_request_irq(struct ql_adapter *qdev)
2801{
2802 int i;
2803 int status = 0;
2804 struct pci_dev *pdev = qdev->pdev;
2805 struct intr_context *intr_context = &qdev->intr_context[0];
2806
2807 ql_resolve_queues_to_irqs(qdev);
2808
2809 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2810 atomic_set(&intr_context->irq_cnt, 0);
2811 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2812 status = request_irq(qdev->msi_x_entry[i].vector,
2813 intr_context->handler,
2814 0,
2815 intr_context->name,
2816 &qdev->rx_ring[i]);
2817 if (status) {
2818 QPRINTK(qdev, IFUP, ERR,
2819 "Failed request for MSIX interrupt %d.\n",
2820 i);
2821 goto err_irq;
2822 } else {
2823 QPRINTK(qdev, IFUP, INFO,
2824 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2825 i,
2826 qdev->rx_ring[i].type ==
2827 DEFAULT_Q ? "DEFAULT_Q" : "",
2828 qdev->rx_ring[i].type ==
2829 TX_Q ? "TX_Q" : "",
2830 qdev->rx_ring[i].type ==
2831 RX_Q ? "RX_Q" : "", intr_context->name);
2832 }
2833 } else {
2834 QPRINTK(qdev, IFUP, DEBUG,
2835 "trying msi or legacy interrupts.\n");
2836 QPRINTK(qdev, IFUP, DEBUG,
2837 "%s: irq = %d.\n", __func__, pdev->irq);
2838 QPRINTK(qdev, IFUP, DEBUG,
2839 "%s: context->name = %s.\n", __func__,
2840 intr_context->name);
2841 QPRINTK(qdev, IFUP, DEBUG,
2842 "%s: dev_id = 0x%p.\n", __func__,
2843 &qdev->rx_ring[0]);
2844 status =
2845 request_irq(pdev->irq, qlge_isr,
2846 test_bit(QL_MSI_ENABLED,
2847 &qdev->
2848 flags) ? 0 : IRQF_SHARED,
2849 intr_context->name, &qdev->rx_ring[0]);
2850 if (status)
2851 goto err_irq;
2852
2853 QPRINTK(qdev, IFUP, ERR,
2854 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2855 i,
2856 qdev->rx_ring[0].type ==
2857 DEFAULT_Q ? "DEFAULT_Q" : "",
2858 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2859 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2860 intr_context->name);
2861 }
2862 intr_context->hooked = 1;
2863 }
2864 return status;
2865err_irq:
2866 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2867 ql_free_irq(qdev);
2868 return status;
2869}
2870
2871static int ql_start_rss(struct ql_adapter *qdev)
2872{
2873 struct ricb *ricb = &qdev->ricb;
2874 int status = 0;
2875 int i;
2876 u8 *hash_id = (u8 *) ricb->hash_cq_id;
2877
2878 memset((void *)ricb, 0, sizeof(ricb));
2879
2880 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2881 ricb->flags =
2882 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2883 RSS_RT6);
2884 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2885
2886 /*
2887 * Fill out the Indirection Table.
2888 */
Ron Mercerdef48b62009-02-12 16:38:18 -08002889 for (i = 0; i < 256; i++)
2890 hash_id[i] = i & (qdev->rss_ring_count - 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002891
2892 /*
2893 * Random values for the IPv6 and IPv4 Hash Keys.
2894 */
2895 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2896 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2897
2898 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2899
2900 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2901 if (status) {
2902 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2903 return status;
2904 }
2905 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2906 return status;
2907}
2908
2909/* Initialize the frame-to-queue routing. */
2910static int ql_route_initialize(struct ql_adapter *qdev)
2911{
2912 int status = 0;
2913 int i;
2914
2915 /* Clear all the entries in the routing table. */
2916 for (i = 0; i < 16; i++) {
2917 status = ql_set_routing_reg(qdev, i, 0, 0);
2918 if (status) {
2919 QPRINTK(qdev, IFUP, ERR,
2920 "Failed to init routing register for CAM packets.\n");
2921 return status;
2922 }
2923 }
2924
2925 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2926 if (status) {
2927 QPRINTK(qdev, IFUP, ERR,
2928 "Failed to init routing register for error packets.\n");
2929 return status;
2930 }
2931 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2932 if (status) {
2933 QPRINTK(qdev, IFUP, ERR,
2934 "Failed to init routing register for broadcast packets.\n");
2935 return status;
2936 }
2937 /* If we have more than one inbound queue, then turn on RSS in the
2938 * routing block.
2939 */
2940 if (qdev->rss_ring_count > 1) {
2941 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2942 RT_IDX_RSS_MATCH, 1);
2943 if (status) {
2944 QPRINTK(qdev, IFUP, ERR,
2945 "Failed to init routing register for MATCH RSS packets.\n");
2946 return status;
2947 }
2948 }
2949
2950 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2951 RT_IDX_CAM_HIT, 1);
2952 if (status) {
2953 QPRINTK(qdev, IFUP, ERR,
2954 "Failed to init routing register for CAM packets.\n");
2955 return status;
2956 }
2957 return status;
2958}
2959
2960static int ql_adapter_initialize(struct ql_adapter *qdev)
2961{
2962 u32 value, mask;
2963 int i;
2964 int status = 0;
2965
2966 /*
2967 * Set up the System register to halt on errors.
2968 */
2969 value = SYS_EFE | SYS_FAE;
2970 mask = value << 16;
2971 ql_write32(qdev, SYS, mask | value);
2972
2973 /* Set the default queue. */
2974 value = NIC_RCV_CFG_DFQ;
2975 mask = NIC_RCV_CFG_DFQ_MASK;
2976 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
2977
2978 /* Set the MPI interrupt to enabled. */
2979 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
2980
2981 /* Enable the function, set pagesize, enable error checking. */
2982 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
2983 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
2984
2985 /* Set/clear header splitting. */
2986 mask = FSC_VM_PAGESIZE_MASK |
2987 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
2988 ql_write32(qdev, FSC, mask | value);
2989
2990 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
2991 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
2992
2993 /* Start up the rx queues. */
2994 for (i = 0; i < qdev->rx_ring_count; i++) {
2995 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
2996 if (status) {
2997 QPRINTK(qdev, IFUP, ERR,
2998 "Failed to start rx ring[%d].\n", i);
2999 return status;
3000 }
3001 }
3002
3003 /* If there is more than one inbound completion queue
3004 * then download a RICB to configure RSS.
3005 */
3006 if (qdev->rss_ring_count > 1) {
3007 status = ql_start_rss(qdev);
3008 if (status) {
3009 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3010 return status;
3011 }
3012 }
3013
3014 /* Start up the tx queues. */
3015 for (i = 0; i < qdev->tx_ring_count; i++) {
3016 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3017 if (status) {
3018 QPRINTK(qdev, IFUP, ERR,
3019 "Failed to start tx ring[%d].\n", i);
3020 return status;
3021 }
3022 }
3023
3024 status = ql_port_initialize(qdev);
3025 if (status) {
3026 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3027 return status;
3028 }
3029
3030 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3031 MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3032 if (status) {
3033 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3034 return status;
3035 }
3036
3037 status = ql_route_initialize(qdev);
3038 if (status) {
3039 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3040 return status;
3041 }
3042
3043 /* Start NAPI for the RSS queues. */
3044 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3045 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3046 i);
3047 napi_enable(&qdev->rx_ring[i].napi);
3048 }
3049
3050 return status;
3051}
3052
3053/* Issue soft reset to chip. */
3054static int ql_adapter_reset(struct ql_adapter *qdev)
3055{
3056 u32 value;
3057 int max_wait_time;
3058 int status = 0;
3059 int resetCnt = 0;
3060
3061#define MAX_RESET_CNT 1
3062issueReset:
3063 resetCnt++;
3064 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3065 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3066 /* Wait for reset to complete. */
3067 max_wait_time = 3;
3068 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3069 max_wait_time);
3070 do {
3071 value = ql_read32(qdev, RST_FO);
3072 if ((value & RST_FO_FR) == 0)
3073 break;
3074
3075 ssleep(1);
3076 } while ((--max_wait_time));
3077 if (value & RST_FO_FR) {
3078 QPRINTK(qdev, IFDOWN, ERR,
3079 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3080 if (resetCnt < MAX_RESET_CNT)
3081 goto issueReset;
3082 }
3083 if (max_wait_time == 0) {
3084 status = -ETIMEDOUT;
3085 QPRINTK(qdev, IFDOWN, ERR,
3086 "ETIMEOUT!!! errored out of resetting the chip!\n");
3087 }
3088
3089 return status;
3090}
3091
3092static void ql_display_dev_info(struct net_device *ndev)
3093{
3094 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3095
3096 QPRINTK(qdev, PROBE, INFO,
3097 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3098 "XG Roll = %d, XG Rev = %d.\n",
3099 qdev->func,
3100 qdev->chip_rev_id & 0x0000000f,
3101 qdev->chip_rev_id >> 4 & 0x0000000f,
3102 qdev->chip_rev_id >> 8 & 0x0000000f,
3103 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003104 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003105}
3106
3107static int ql_adapter_down(struct ql_adapter *qdev)
3108{
3109 struct net_device *ndev = qdev->ndev;
3110 int i, status = 0;
3111 struct rx_ring *rx_ring;
3112
3113 netif_stop_queue(ndev);
3114 netif_carrier_off(ndev);
3115
Ron Mercer6497b602009-02-12 16:37:13 -08003116 /* Don't kill the reset worker thread if we
3117 * are in the process of recovery.
3118 */
3119 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3120 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003121 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3122 cancel_delayed_work_sync(&qdev->mpi_work);
3123
3124 /* The default queue at index 0 is always processed in
3125 * a workqueue.
3126 */
3127 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3128
3129 /* The rest of the rx_rings are processed in
3130 * a workqueue only if it's a single interrupt
3131 * environment (MSI/Legacy).
3132 */
Roel Kluinc0620762008-12-25 17:23:50 -08003133 for (i = 1; i < qdev->rx_ring_count; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134 rx_ring = &qdev->rx_ring[i];
3135 /* Only the RSS rings use NAPI on multi irq
3136 * environment. Outbound completion processing
3137 * is done in interrupt context.
3138 */
3139 if (i >= qdev->rss_ring_first_cq_id) {
3140 napi_disable(&rx_ring->napi);
3141 } else {
3142 cancel_delayed_work_sync(&rx_ring->rx_work);
3143 }
3144 }
3145
3146 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3147
3148 ql_disable_interrupts(qdev);
3149
3150 ql_tx_ring_clean(qdev);
3151
3152 spin_lock(&qdev->hw_lock);
3153 status = ql_adapter_reset(qdev);
3154 if (status)
3155 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3156 qdev->func);
3157 spin_unlock(&qdev->hw_lock);
3158 return status;
3159}
3160
3161static int ql_adapter_up(struct ql_adapter *qdev)
3162{
3163 int err = 0;
3164
3165 spin_lock(&qdev->hw_lock);
3166 err = ql_adapter_initialize(qdev);
3167 if (err) {
3168 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3169 spin_unlock(&qdev->hw_lock);
3170 goto err_init;
3171 }
3172 spin_unlock(&qdev->hw_lock);
3173 set_bit(QL_ADAPTER_UP, &qdev->flags);
3174 ql_enable_interrupts(qdev);
3175 ql_enable_all_completion_interrupts(qdev);
3176 if ((ql_read32(qdev, STS) & qdev->port_init)) {
3177 netif_carrier_on(qdev->ndev);
3178 netif_start_queue(qdev->ndev);
3179 }
3180
3181 return 0;
3182err_init:
3183 ql_adapter_reset(qdev);
3184 return err;
3185}
3186
3187static int ql_cycle_adapter(struct ql_adapter *qdev)
3188{
3189 int status;
3190
3191 status = ql_adapter_down(qdev);
3192 if (status)
3193 goto error;
3194
3195 status = ql_adapter_up(qdev);
3196 if (status)
3197 goto error;
3198
3199 return status;
3200error:
3201 QPRINTK(qdev, IFUP, ALERT,
3202 "Driver up/down cycle failed, closing device\n");
3203 rtnl_lock();
3204 dev_close(qdev->ndev);
3205 rtnl_unlock();
3206 return status;
3207}
3208
3209static void ql_release_adapter_resources(struct ql_adapter *qdev)
3210{
3211 ql_free_mem_resources(qdev);
3212 ql_free_irq(qdev);
3213}
3214
3215static int ql_get_adapter_resources(struct ql_adapter *qdev)
3216{
3217 int status = 0;
3218
3219 if (ql_alloc_mem_resources(qdev)) {
3220 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3221 return -ENOMEM;
3222 }
3223 status = ql_request_irq(qdev);
3224 if (status)
3225 goto err_irq;
3226 return status;
3227err_irq:
3228 ql_free_mem_resources(qdev);
3229 return status;
3230}
3231
3232static int qlge_close(struct net_device *ndev)
3233{
3234 struct ql_adapter *qdev = netdev_priv(ndev);
3235
3236 /*
3237 * Wait for device to recover from a reset.
3238 * (Rarely happens, but possible.)
3239 */
3240 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3241 msleep(1);
3242 ql_adapter_down(qdev);
3243 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003244 return 0;
3245}
3246
3247static int ql_configure_rings(struct ql_adapter *qdev)
3248{
3249 int i;
3250 struct rx_ring *rx_ring;
3251 struct tx_ring *tx_ring;
3252 int cpu_cnt = num_online_cpus();
3253
3254 /*
3255 * For each processor present we allocate one
3256 * rx_ring for outbound completions, and one
3257 * rx_ring for inbound completions. Plus there is
3258 * always the one default queue. For the CPU
3259 * counts we end up with the following rx_rings:
3260 * rx_ring count =
3261 * one default queue +
3262 * (CPU count * outbound completion rx_ring) +
3263 * (CPU count * inbound (RSS) completion rx_ring)
3264 * To keep it simple we limit the total number of
3265 * queues to < 32, so we truncate CPU to 8.
3266 * This limitation can be removed when requested.
3267 */
3268
Ron Mercer683d46a2009-01-09 11:31:53 +00003269 if (cpu_cnt > MAX_CPUS)
3270 cpu_cnt = MAX_CPUS;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003271
3272 /*
3273 * rx_ring[0] is always the default queue.
3274 */
3275 /* Allocate outbound completion ring for each CPU. */
3276 qdev->tx_ring_count = cpu_cnt;
3277 /* Allocate inbound completion (RSS) ring for each CPU. */
3278 qdev->rss_ring_count = cpu_cnt;
3279 /* cq_id for the first inbound ring handler. */
3280 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3281 /*
3282 * qdev->rx_ring_count:
3283 * Total number of rx_rings. This includes the one
3284 * default queue, a number of outbound completion
3285 * handler rx_rings, and the number of inbound
3286 * completion handler rx_rings.
3287 */
3288 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3289
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003290 for (i = 0; i < qdev->tx_ring_count; i++) {
3291 tx_ring = &qdev->tx_ring[i];
3292 memset((void *)tx_ring, 0, sizeof(tx_ring));
3293 tx_ring->qdev = qdev;
3294 tx_ring->wq_id = i;
3295 tx_ring->wq_len = qdev->tx_ring_size;
3296 tx_ring->wq_size =
3297 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3298
3299 /*
3300 * The completion queue ID for the tx rings start
3301 * immediately after the default Q ID, which is zero.
3302 */
3303 tx_ring->cq_id = i + 1;
3304 }
3305
3306 for (i = 0; i < qdev->rx_ring_count; i++) {
3307 rx_ring = &qdev->rx_ring[i];
3308 memset((void *)rx_ring, 0, sizeof(rx_ring));
3309 rx_ring->qdev = qdev;
3310 rx_ring->cq_id = i;
3311 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3312 if (i == 0) { /* Default queue at index 0. */
3313 /*
3314 * Default queue handles bcast/mcast plus
3315 * async events. Needs buffers.
3316 */
3317 rx_ring->cq_len = qdev->rx_ring_size;
3318 rx_ring->cq_size =
3319 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3320 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3321 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003322 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003323 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3324 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3325 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003326 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003327 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3328 rx_ring->type = DEFAULT_Q;
3329 } else if (i < qdev->rss_ring_first_cq_id) {
3330 /*
3331 * Outbound queue handles outbound completions only.
3332 */
3333 /* outbound cq is same size as tx_ring it services. */
3334 rx_ring->cq_len = qdev->tx_ring_size;
3335 rx_ring->cq_size =
3336 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3337 rx_ring->lbq_len = 0;
3338 rx_ring->lbq_size = 0;
3339 rx_ring->lbq_buf_size = 0;
3340 rx_ring->sbq_len = 0;
3341 rx_ring->sbq_size = 0;
3342 rx_ring->sbq_buf_size = 0;
3343 rx_ring->type = TX_Q;
3344 } else { /* Inbound completions (RSS) queues */
3345 /*
3346 * Inbound queues handle unicast frames only.
3347 */
3348 rx_ring->cq_len = qdev->rx_ring_size;
3349 rx_ring->cq_size =
3350 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3351 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3352 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003353 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003354 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3355 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3356 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003357 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003358 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3359 rx_ring->type = RX_Q;
3360 }
3361 }
3362 return 0;
3363}
3364
3365static int qlge_open(struct net_device *ndev)
3366{
3367 int err = 0;
3368 struct ql_adapter *qdev = netdev_priv(ndev);
3369
3370 err = ql_configure_rings(qdev);
3371 if (err)
3372 return err;
3373
3374 err = ql_get_adapter_resources(qdev);
3375 if (err)
3376 goto error_up;
3377
3378 err = ql_adapter_up(qdev);
3379 if (err)
3380 goto error_up;
3381
3382 return err;
3383
3384error_up:
3385 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003386 return err;
3387}
3388
3389static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3390{
3391 struct ql_adapter *qdev = netdev_priv(ndev);
3392
3393 if (ndev->mtu == 1500 && new_mtu == 9000) {
3394 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3395 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3396 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3397 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3398 (ndev->mtu == 9000 && new_mtu == 9000)) {
3399 return 0;
3400 } else
3401 return -EINVAL;
3402 ndev->mtu = new_mtu;
3403 return 0;
3404}
3405
3406static struct net_device_stats *qlge_get_stats(struct net_device
3407 *ndev)
3408{
3409 struct ql_adapter *qdev = netdev_priv(ndev);
3410 return &qdev->stats;
3411}
3412
3413static void qlge_set_multicast_list(struct net_device *ndev)
3414{
3415 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3416 struct dev_mc_list *mc_ptr;
3417 int i;
3418
3419 spin_lock(&qdev->hw_lock);
3420 /*
3421 * Set or clear promiscuous mode if a
3422 * transition is taking place.
3423 */
3424 if (ndev->flags & IFF_PROMISC) {
3425 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3426 if (ql_set_routing_reg
3427 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3428 QPRINTK(qdev, HW, ERR,
3429 "Failed to set promiscous mode.\n");
3430 } else {
3431 set_bit(QL_PROMISCUOUS, &qdev->flags);
3432 }
3433 }
3434 } else {
3435 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3436 if (ql_set_routing_reg
3437 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3438 QPRINTK(qdev, HW, ERR,
3439 "Failed to clear promiscous mode.\n");
3440 } else {
3441 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3442 }
3443 }
3444 }
3445
3446 /*
3447 * Set or clear all multicast mode if a
3448 * transition is taking place.
3449 */
3450 if ((ndev->flags & IFF_ALLMULTI) ||
3451 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3452 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3453 if (ql_set_routing_reg
3454 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3455 QPRINTK(qdev, HW, ERR,
3456 "Failed to set all-multi mode.\n");
3457 } else {
3458 set_bit(QL_ALLMULTI, &qdev->flags);
3459 }
3460 }
3461 } else {
3462 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3463 if (ql_set_routing_reg
3464 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3465 QPRINTK(qdev, HW, ERR,
3466 "Failed to clear all-multi mode.\n");
3467 } else {
3468 clear_bit(QL_ALLMULTI, &qdev->flags);
3469 }
3470 }
3471 }
3472
3473 if (ndev->mc_count) {
3474 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3475 i++, mc_ptr = mc_ptr->next)
3476 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3477 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3478 QPRINTK(qdev, HW, ERR,
3479 "Failed to loadmulticast address.\n");
3480 goto exit;
3481 }
3482 if (ql_set_routing_reg
3483 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3484 QPRINTK(qdev, HW, ERR,
3485 "Failed to set multicast match mode.\n");
3486 } else {
3487 set_bit(QL_ALLMULTI, &qdev->flags);
3488 }
3489 }
3490exit:
3491 spin_unlock(&qdev->hw_lock);
3492}
3493
3494static int qlge_set_mac_address(struct net_device *ndev, void *p)
3495{
3496 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3497 struct sockaddr *addr = p;
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003498 int ret = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003499
3500 if (netif_running(ndev))
3501 return -EBUSY;
3502
3503 if (!is_valid_ether_addr(addr->sa_data))
3504 return -EADDRNOTAVAIL;
3505 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3506
3507 spin_lock(&qdev->hw_lock);
3508 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3509 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3510 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003511 ret = -1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003512 }
3513 spin_unlock(&qdev->hw_lock);
3514
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003515 return ret;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003516}
3517
3518static void qlge_tx_timeout(struct net_device *ndev)
3519{
3520 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003521 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003522}
3523
3524static void ql_asic_reset_work(struct work_struct *work)
3525{
3526 struct ql_adapter *qdev =
3527 container_of(work, struct ql_adapter, asic_reset_work.work);
3528 ql_cycle_adapter(qdev);
3529}
3530
3531static void ql_get_board_info(struct ql_adapter *qdev)
3532{
3533 qdev->func =
3534 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3535 if (qdev->func) {
3536 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3537 qdev->port_link_up = STS_PL1;
3538 qdev->port_init = STS_PI1;
3539 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3540 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3541 } else {
3542 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3543 qdev->port_link_up = STS_PL0;
3544 qdev->port_init = STS_PI0;
3545 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3546 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3547 }
3548 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3549}
3550
3551static void ql_release_all(struct pci_dev *pdev)
3552{
3553 struct net_device *ndev = pci_get_drvdata(pdev);
3554 struct ql_adapter *qdev = netdev_priv(ndev);
3555
3556 if (qdev->workqueue) {
3557 destroy_workqueue(qdev->workqueue);
3558 qdev->workqueue = NULL;
3559 }
3560 if (qdev->q_workqueue) {
3561 destroy_workqueue(qdev->q_workqueue);
3562 qdev->q_workqueue = NULL;
3563 }
3564 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003565 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003566 if (qdev->doorbell_area)
3567 iounmap(qdev->doorbell_area);
3568 pci_release_regions(pdev);
3569 pci_set_drvdata(pdev, NULL);
3570}
3571
3572static int __devinit ql_init_device(struct pci_dev *pdev,
3573 struct net_device *ndev, int cards_found)
3574{
3575 struct ql_adapter *qdev = netdev_priv(ndev);
3576 int pos, err = 0;
3577 u16 val16;
3578
3579 memset((void *)qdev, 0, sizeof(qdev));
3580 err = pci_enable_device(pdev);
3581 if (err) {
3582 dev_err(&pdev->dev, "PCI device enable failed.\n");
3583 return err;
3584 }
3585
3586 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3587 if (pos <= 0) {
3588 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3589 "aborting.\n");
3590 goto err_out;
3591 } else {
3592 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3593 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3594 val16 |= (PCI_EXP_DEVCTL_CERE |
3595 PCI_EXP_DEVCTL_NFERE |
3596 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3597 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3598 }
3599
3600 err = pci_request_regions(pdev, DRV_NAME);
3601 if (err) {
3602 dev_err(&pdev->dev, "PCI region request failed.\n");
3603 goto err_out;
3604 }
3605
3606 pci_set_master(pdev);
3607 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3608 set_bit(QL_DMA64, &qdev->flags);
3609 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3610 } else {
3611 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3612 if (!err)
3613 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3614 }
3615
3616 if (err) {
3617 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3618 goto err_out;
3619 }
3620
3621 pci_set_drvdata(pdev, ndev);
3622 qdev->reg_base =
3623 ioremap_nocache(pci_resource_start(pdev, 1),
3624 pci_resource_len(pdev, 1));
3625 if (!qdev->reg_base) {
3626 dev_err(&pdev->dev, "Register mapping failed.\n");
3627 err = -ENOMEM;
3628 goto err_out;
3629 }
3630
3631 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3632 qdev->doorbell_area =
3633 ioremap_nocache(pci_resource_start(pdev, 3),
3634 pci_resource_len(pdev, 3));
3635 if (!qdev->doorbell_area) {
3636 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3637 err = -ENOMEM;
3638 goto err_out;
3639 }
3640
3641 ql_get_board_info(qdev);
3642 qdev->ndev = ndev;
3643 qdev->pdev = pdev;
3644 qdev->msg_enable = netif_msg_init(debug, default_msg);
3645 spin_lock_init(&qdev->hw_lock);
3646 spin_lock_init(&qdev->stats_lock);
3647
3648 /* make sure the EEPROM is good */
3649 err = ql_get_flash_params(qdev);
3650 if (err) {
3651 dev_err(&pdev->dev, "Invalid FLASH.\n");
3652 goto err_out;
3653 }
3654
3655 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3656 goto err_out;
3657
3658 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3659 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3660
3661 /* Set up the default ring sizes. */
3662 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3663 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3664
3665 /* Set up the coalescing parameters. */
3666 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3667 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3668 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3669 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3670
3671 /*
3672 * Set up the operating parameters.
3673 */
3674 qdev->rx_csum = 1;
3675
3676 qdev->q_workqueue = create_workqueue(ndev->name);
3677 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3678 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3679 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3680 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3681
3682 if (!cards_found) {
3683 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3684 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3685 DRV_NAME, DRV_VERSION);
3686 }
3687 return 0;
3688err_out:
3689 ql_release_all(pdev);
3690 pci_disable_device(pdev);
3691 return err;
3692}
3693
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003694
3695static const struct net_device_ops qlge_netdev_ops = {
3696 .ndo_open = qlge_open,
3697 .ndo_stop = qlge_close,
3698 .ndo_start_xmit = qlge_send,
3699 .ndo_change_mtu = qlge_change_mtu,
3700 .ndo_get_stats = qlge_get_stats,
3701 .ndo_set_multicast_list = qlge_set_multicast_list,
3702 .ndo_set_mac_address = qlge_set_mac_address,
3703 .ndo_validate_addr = eth_validate_addr,
3704 .ndo_tx_timeout = qlge_tx_timeout,
3705 .ndo_vlan_rx_register = ql_vlan_rx_register,
3706 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3707 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3708};
3709
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003710static int __devinit qlge_probe(struct pci_dev *pdev,
3711 const struct pci_device_id *pci_entry)
3712{
3713 struct net_device *ndev = NULL;
3714 struct ql_adapter *qdev = NULL;
3715 static int cards_found = 0;
3716 int err = 0;
3717
3718 ndev = alloc_etherdev(sizeof(struct ql_adapter));
3719 if (!ndev)
3720 return -ENOMEM;
3721
3722 err = ql_init_device(pdev, ndev, cards_found);
3723 if (err < 0) {
3724 free_netdev(ndev);
3725 return err;
3726 }
3727
3728 qdev = netdev_priv(ndev);
3729 SET_NETDEV_DEV(ndev, &pdev->dev);
3730 ndev->features = (0
3731 | NETIF_F_IP_CSUM
3732 | NETIF_F_SG
3733 | NETIF_F_TSO
3734 | NETIF_F_TSO6
3735 | NETIF_F_TSO_ECN
3736 | NETIF_F_HW_VLAN_TX
3737 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3738
3739 if (test_bit(QL_DMA64, &qdev->flags))
3740 ndev->features |= NETIF_F_HIGHDMA;
3741
3742 /*
3743 * Set up net_device structure.
3744 */
3745 ndev->tx_queue_len = qdev->tx_ring_size;
3746 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003747
3748 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003749 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003750 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003751
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003752 err = register_netdev(ndev);
3753 if (err) {
3754 dev_err(&pdev->dev, "net device registration failed.\n");
3755 ql_release_all(pdev);
3756 pci_disable_device(pdev);
3757 return err;
3758 }
3759 netif_carrier_off(ndev);
3760 netif_stop_queue(ndev);
3761 ql_display_dev_info(ndev);
3762 cards_found++;
3763 return 0;
3764}
3765
3766static void __devexit qlge_remove(struct pci_dev *pdev)
3767{
3768 struct net_device *ndev = pci_get_drvdata(pdev);
3769 unregister_netdev(ndev);
3770 ql_release_all(pdev);
3771 pci_disable_device(pdev);
3772 free_netdev(ndev);
3773}
3774
3775/*
3776 * This callback is called by the PCI subsystem whenever
3777 * a PCI bus error is detected.
3778 */
3779static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3780 enum pci_channel_state state)
3781{
3782 struct net_device *ndev = pci_get_drvdata(pdev);
3783 struct ql_adapter *qdev = netdev_priv(ndev);
3784
3785 if (netif_running(ndev))
3786 ql_adapter_down(qdev);
3787
3788 pci_disable_device(pdev);
3789
3790 /* Request a slot reset. */
3791 return PCI_ERS_RESULT_NEED_RESET;
3792}
3793
3794/*
3795 * This callback is called after the PCI buss has been reset.
3796 * Basically, this tries to restart the card from scratch.
3797 * This is a shortened version of the device probe/discovery code,
3798 * it resembles the first-half of the () routine.
3799 */
3800static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3801{
3802 struct net_device *ndev = pci_get_drvdata(pdev);
3803 struct ql_adapter *qdev = netdev_priv(ndev);
3804
3805 if (pci_enable_device(pdev)) {
3806 QPRINTK(qdev, IFUP, ERR,
3807 "Cannot re-enable PCI device after reset.\n");
3808 return PCI_ERS_RESULT_DISCONNECT;
3809 }
3810
3811 pci_set_master(pdev);
3812
3813 netif_carrier_off(ndev);
3814 netif_stop_queue(ndev);
3815 ql_adapter_reset(qdev);
3816
3817 /* Make sure the EEPROM is good */
3818 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3819
3820 if (!is_valid_ether_addr(ndev->perm_addr)) {
3821 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3822 return PCI_ERS_RESULT_DISCONNECT;
3823 }
3824
3825 return PCI_ERS_RESULT_RECOVERED;
3826}
3827
3828static void qlge_io_resume(struct pci_dev *pdev)
3829{
3830 struct net_device *ndev = pci_get_drvdata(pdev);
3831 struct ql_adapter *qdev = netdev_priv(ndev);
3832
3833 pci_set_master(pdev);
3834
3835 if (netif_running(ndev)) {
3836 if (ql_adapter_up(qdev)) {
3837 QPRINTK(qdev, IFUP, ERR,
3838 "Device initialization failed after reset.\n");
3839 return;
3840 }
3841 }
3842
3843 netif_device_attach(ndev);
3844}
3845
3846static struct pci_error_handlers qlge_err_handler = {
3847 .error_detected = qlge_io_error_detected,
3848 .slot_reset = qlge_io_slot_reset,
3849 .resume = qlge_io_resume,
3850};
3851
3852static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3853{
3854 struct net_device *ndev = pci_get_drvdata(pdev);
3855 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer0047e5d2009-02-02 13:54:31 -08003856 int err, i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003857
3858 netif_device_detach(ndev);
3859
3860 if (netif_running(ndev)) {
3861 err = ql_adapter_down(qdev);
3862 if (!err)
3863 return err;
3864 }
3865
Ron Mercer0047e5d2009-02-02 13:54:31 -08003866 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3867 netif_napi_del(&qdev->rx_ring[i].napi);
3868
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003869 err = pci_save_state(pdev);
3870 if (err)
3871 return err;
3872
3873 pci_disable_device(pdev);
3874
3875 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3876
3877 return 0;
3878}
3879
David S. Miller04da2cf2008-09-19 16:14:24 -07003880#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003881static int qlge_resume(struct pci_dev *pdev)
3882{
3883 struct net_device *ndev = pci_get_drvdata(pdev);
3884 struct ql_adapter *qdev = netdev_priv(ndev);
3885 int err;
3886
3887 pci_set_power_state(pdev, PCI_D0);
3888 pci_restore_state(pdev);
3889 err = pci_enable_device(pdev);
3890 if (err) {
3891 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3892 return err;
3893 }
3894 pci_set_master(pdev);
3895
3896 pci_enable_wake(pdev, PCI_D3hot, 0);
3897 pci_enable_wake(pdev, PCI_D3cold, 0);
3898
3899 if (netif_running(ndev)) {
3900 err = ql_adapter_up(qdev);
3901 if (err)
3902 return err;
3903 }
3904
3905 netif_device_attach(ndev);
3906
3907 return 0;
3908}
David S. Miller04da2cf2008-09-19 16:14:24 -07003909#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003910
3911static void qlge_shutdown(struct pci_dev *pdev)
3912{
3913 qlge_suspend(pdev, PMSG_SUSPEND);
3914}
3915
3916static struct pci_driver qlge_driver = {
3917 .name = DRV_NAME,
3918 .id_table = qlge_pci_tbl,
3919 .probe = qlge_probe,
3920 .remove = __devexit_p(qlge_remove),
3921#ifdef CONFIG_PM
3922 .suspend = qlge_suspend,
3923 .resume = qlge_resume,
3924#endif
3925 .shutdown = qlge_shutdown,
3926 .err_handler = &qlge_err_handler
3927};
3928
3929static int __init qlge_init_module(void)
3930{
3931 return pci_register_driver(&qlge_driver);
3932}
3933
3934static void __exit qlge_exit(void)
3935{
3936 pci_unregister_driver(&qlge_driver);
3937}
3938
3939module_init(qlge_init_module);
3940module_exit(qlge_exit);