blob: 17198459918e3a2b8122d47a576da1060567b88b [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_TX_QUEUED |
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
216 status = ql_wait_cfg(qdev, bit);
217 if (status) {
218 QPRINTK(qdev, IFUP, ERR,
219 "Timed out waiting for CFG to come ready.\n");
220 goto exit;
221 }
222
223 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
224 if (status)
225 goto exit;
226 ql_write32(qdev, ICB_L, (u32) map);
227 ql_write32(qdev, ICB_H, (u32) (map >> 32));
228 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
239 pci_unmap_single(qdev->pdev, map, size, direction);
240 return status;
241}
242
243/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
244int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
245 u32 *value)
246{
247 u32 offset = 0;
248 int status;
249
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400250 switch (type) {
251 case MAC_ADDR_TYPE_MULTI_MAC:
252 case MAC_ADDR_TYPE_CAM_MAC:
253 {
254 status =
255 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800256 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 if (status)
258 goto exit;
259 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
260 (index << MAC_ADDR_IDX_SHIFT) | /* index */
261 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
262 status =
263 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800264 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400265 if (status)
266 goto exit;
267 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
268 status =
269 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800270 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400271 if (status)
272 goto exit;
273 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
274 (index << MAC_ADDR_IDX_SHIFT) | /* index */
275 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
276 status =
277 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800278 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400279 if (status)
280 goto exit;
281 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
282 if (type == MAC_ADDR_TYPE_CAM_MAC) {
283 status =
284 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800285 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400286 if (status)
287 goto exit;
288 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
289 (index << MAC_ADDR_IDX_SHIFT) | /* index */
290 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
291 status =
292 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800293 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400294 if (status)
295 goto exit;
296 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
297 }
298 break;
299 }
300 case MAC_ADDR_TYPE_VLAN:
301 case MAC_ADDR_TYPE_MULTI_FLTR:
302 default:
303 QPRINTK(qdev, IFUP, CRIT,
304 "Address type %d not yet supported.\n", type);
305 status = -EPERM;
306 }
307exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400308 return status;
309}
310
311/* Set up a MAC, multicast or VLAN address for the
312 * inbound frame matching.
313 */
314static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
315 u16 index)
316{
317 u32 offset = 0;
318 int status = 0;
319
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400320 switch (type) {
321 case MAC_ADDR_TYPE_MULTI_MAC:
322 case MAC_ADDR_TYPE_CAM_MAC:
323 {
324 u32 cam_output;
325 u32 upper = (addr[0] << 8) | addr[1];
326 u32 lower =
327 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
328 (addr[5]);
329
330 QPRINTK(qdev, IFUP, INFO,
Johannes Berg7c510e42008-10-27 17:47:26 -0700331 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400332 " at index %d in the CAM.\n",
333 ((type ==
334 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700335 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400336
337 status =
338 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
343 (index << MAC_ADDR_IDX_SHIFT) | /* index */
344 type); /* type */
345 ql_write32(qdev, MAC_ADDR_DATA, lower);
346 status =
347 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
352 (index << MAC_ADDR_IDX_SHIFT) | /* index */
353 type); /* type */
354 ql_write32(qdev, MAC_ADDR_DATA, upper);
355 status =
356 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
361 (index << MAC_ADDR_IDX_SHIFT) | /* index */
362 type); /* type */
363 /* This field should also include the queue id
364 and possibly the function id. Right now we hardcode
365 the route field to NIC core.
366 */
367 if (type == MAC_ADDR_TYPE_CAM_MAC) {
368 cam_output = (CAM_OUT_ROUTE_NIC |
369 (qdev->
370 func << CAM_OUT_FUNC_SHIFT) |
371 (qdev->
372 rss_ring_first_cq_id <<
373 CAM_OUT_CQ_ID_SHIFT));
374 if (qdev->vlgrp)
375 cam_output |= CAM_OUT_RV;
376 /* route to NIC core */
377 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
378 }
379 break;
380 }
381 case MAC_ADDR_TYPE_VLAN:
382 {
383 u32 enable_bit = *((u32 *) &addr[0]);
384 /* For VLAN, the addr actually holds a bit that
385 * either enables or disables the vlan id we are
386 * addressing. It's either MAC_ADDR_E on or off.
387 * That's bit-27 we're talking about.
388 */
389 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
390 (enable_bit ? "Adding" : "Removing"),
391 index, (enable_bit ? "to" : "from"));
392
393 status =
394 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800395 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400396 if (status)
397 goto exit;
398 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
399 (index << MAC_ADDR_IDX_SHIFT) | /* index */
400 type | /* type */
401 enable_bit); /* enable/disable */
402 break;
403 }
404 case MAC_ADDR_TYPE_MULTI_FLTR:
405 default:
406 QPRINTK(qdev, IFUP, CRIT,
407 "Address type %d not yet supported.\n", type);
408 status = -EPERM;
409 }
410exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400411 return status;
412}
413
414/* Get a specific frame routing value from the CAM.
415 * Used for debug and reg dump.
416 */
417int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
418{
419 int status = 0;
420
Ron Mercer939678f2009-01-04 17:08:29 -0800421 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400422 if (status)
423 goto exit;
424
425 ql_write32(qdev, RT_IDX,
426 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800427 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400428 if (status)
429 goto exit;
430 *value = ql_read32(qdev, RT_DATA);
431exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400432 return status;
433}
434
435/* The NIC function for this chip has 16 routing indexes. Each one can be used
436 * to route different frame types to various inbound queues. We send broadcast/
437 * multicast/error frames to the default queue for slow handling,
438 * and CAM hit/RSS frames to the fast handling queues.
439 */
440static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
441 int enable)
442{
Ron Mercer8587ea32009-02-23 10:42:15 +0000443 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 u32 value = 0;
445
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400446 QPRINTK(qdev, IFUP, DEBUG,
447 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
448 (enable ? "Adding" : "Removing"),
449 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
450 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
451 ((index ==
452 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
453 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
454 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
455 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
456 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
457 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
458 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
459 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
460 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
461 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
462 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
463 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
464 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
465 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
466 (enable ? "to" : "from"));
467
468 switch (mask) {
469 case RT_IDX_CAM_HIT:
470 {
471 value = RT_IDX_DST_CAM_Q | /* dest */
472 RT_IDX_TYPE_NICQ | /* type */
473 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
474 break;
475 }
476 case RT_IDX_VALID: /* Promiscuous Mode frames. */
477 {
478 value = RT_IDX_DST_DFLT_Q | /* dest */
479 RT_IDX_TYPE_NICQ | /* type */
480 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
481 break;
482 }
483 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
484 {
485 value = RT_IDX_DST_DFLT_Q | /* dest */
486 RT_IDX_TYPE_NICQ | /* type */
487 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
488 break;
489 }
490 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
491 {
492 value = RT_IDX_DST_DFLT_Q | /* dest */
493 RT_IDX_TYPE_NICQ | /* type */
494 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
495 break;
496 }
497 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
498 {
499 value = RT_IDX_DST_CAM_Q | /* dest */
500 RT_IDX_TYPE_NICQ | /* type */
501 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
502 break;
503 }
504 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
505 {
506 value = RT_IDX_DST_CAM_Q | /* dest */
507 RT_IDX_TYPE_NICQ | /* type */
508 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
509 break;
510 }
511 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
512 {
513 value = RT_IDX_DST_RSS | /* dest */
514 RT_IDX_TYPE_NICQ | /* type */
515 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
516 break;
517 }
518 case 0: /* Clear the E-bit on an entry. */
519 {
520 value = RT_IDX_DST_DFLT_Q | /* dest */
521 RT_IDX_TYPE_NICQ | /* type */
522 (index << RT_IDX_IDX_SHIFT);/* index */
523 break;
524 }
525 default:
526 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
527 mask);
528 status = -EPERM;
529 goto exit;
530 }
531
532 if (value) {
533 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
534 if (status)
535 goto exit;
536 value |= (enable ? RT_IDX_E : 0);
537 ql_write32(qdev, RT_IDX, value);
538 ql_write32(qdev, RT_DATA, enable ? mask : 0);
539 }
540exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400541 return status;
542}
543
544static void ql_enable_interrupts(struct ql_adapter *qdev)
545{
546 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
547}
548
549static void ql_disable_interrupts(struct ql_adapter *qdev)
550{
551 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
552}
553
554/* If we're running with multiple MSI-X vectors then we enable on the fly.
555 * Otherwise, we may have multiple outstanding workers and don't want to
556 * enable until the last one finishes. In this case, the irq_cnt gets
557 * incremented everytime we queue a worker and decremented everytime
558 * a worker finishes. Once it hits zero we enable the interrupt.
559 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700560u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400561{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700562 u32 var = 0;
563 unsigned long hw_flags = 0;
564 struct intr_context *ctx = qdev->intr_context + intr;
565
566 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
567 /* Always enable if we're MSIX multi interrupts and
568 * it's not the default (zeroeth) interrupt.
569 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400570 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700571 ctx->intr_en_mask);
572 var = ql_read32(qdev, STS);
573 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400574 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700575
576 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
577 if (atomic_dec_and_test(&ctx->irq_cnt)) {
578 ql_write32(qdev, INTR_EN,
579 ctx->intr_en_mask);
580 var = ql_read32(qdev, STS);
581 }
582 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
583 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400584}
585
586static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
587{
588 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700589 unsigned long hw_flags;
590 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400591
Ron Mercerbb0d2152008-10-20 10:30:26 -0700592 /* HW disables for us if we're MSIX multi interrupts and
593 * it's not the default (zeroeth) interrupt.
594 */
595 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
596 return 0;
597
598 ctx = qdev->intr_context + intr;
599 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
600 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700602 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400603 var = ql_read32(qdev, STS);
604 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700605 atomic_inc(&ctx->irq_cnt);
606 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400607 return var;
608}
609
610static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
611{
612 int i;
613 for (i = 0; i < qdev->intr_count; i++) {
614 /* The enable call does a atomic_dec_and_test
615 * and enables only if the result is zero.
616 * So we precharge it here.
617 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700618 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
619 i == 0))
620 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400621 ql_enable_completion_interrupt(qdev, i);
622 }
623
624}
625
Ron Mercer26351472009-02-02 13:53:57 -0800626static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400627{
628 int status = 0;
629 /* wait for reg to come ready */
630 status = ql_wait_reg_rdy(qdev,
631 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
632 if (status)
633 goto exit;
634 /* set up for reg read */
635 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
636 /* wait for reg to come ready */
637 status = ql_wait_reg_rdy(qdev,
638 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
639 if (status)
640 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800641 /* This data is stored on flash as an array of
642 * __le32. Since ql_read32() returns cpu endian
643 * we need to swap it back.
644 */
645 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400646exit:
647 return status;
648}
649
650static int ql_get_flash_params(struct ql_adapter *qdev)
651{
652 int i;
653 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800654 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800655 u32 offset = 0;
656
657 /* Second function's parameters follow the first
658 * function's.
659 */
660 if (qdev->func)
661 offset = sizeof(qdev->flash) / sizeof(u32);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400662
663 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
664 return -ETIMEDOUT;
665
666 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800667 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400668 if (status) {
669 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
670 goto exit;
671 }
672
673 }
674exit:
675 ql_sem_unlock(qdev, SEM_FLASH_MASK);
676 return status;
677}
678
679/* xgmac register are located behind the xgmac_addr and xgmac_data
680 * register pair. Each read/write requires us to wait for the ready
681 * bit before reading/writing the data.
682 */
683static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
684{
685 int status;
686 /* wait for reg to come ready */
687 status = ql_wait_reg_rdy(qdev,
688 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
689 if (status)
690 return status;
691 /* write the data to the data reg */
692 ql_write32(qdev, XGMAC_DATA, data);
693 /* trigger the write */
694 ql_write32(qdev, XGMAC_ADDR, reg);
695 return status;
696}
697
698/* xgmac register are located behind the xgmac_addr and xgmac_data
699 * register pair. Each read/write requires us to wait for the ready
700 * bit before reading/writing the data.
701 */
702int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
703{
704 int status = 0;
705 /* wait for reg to come ready */
706 status = ql_wait_reg_rdy(qdev,
707 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
708 if (status)
709 goto exit;
710 /* set up for reg read */
711 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
712 /* wait for reg to come ready */
713 status = ql_wait_reg_rdy(qdev,
714 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
715 if (status)
716 goto exit;
717 /* get the data */
718 *data = ql_read32(qdev, XGMAC_DATA);
719exit:
720 return status;
721}
722
723/* This is used for reading the 64-bit statistics regs. */
724int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
725{
726 int status = 0;
727 u32 hi = 0;
728 u32 lo = 0;
729
730 status = ql_read_xgmac_reg(qdev, reg, &lo);
731 if (status)
732 goto exit;
733
734 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
735 if (status)
736 goto exit;
737
738 *data = (u64) lo | ((u64) hi << 32);
739
740exit:
741 return status;
742}
743
744/* Take the MAC Core out of reset.
745 * Enable statistics counting.
746 * Take the transmitter/receiver out of reset.
747 * This functionality may be done in the MPI firmware at a
748 * later date.
749 */
750static int ql_port_initialize(struct ql_adapter *qdev)
751{
752 int status = 0;
753 u32 data;
754
755 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
756 /* Another function has the semaphore, so
757 * wait for the port init bit to come ready.
758 */
759 QPRINTK(qdev, LINK, INFO,
760 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
761 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
762 if (status) {
763 QPRINTK(qdev, LINK, CRIT,
764 "Port initialize timed out.\n");
765 }
766 return status;
767 }
768
769 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
770 /* Set the core reset. */
771 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
772 if (status)
773 goto end;
774 data |= GLOBAL_CFG_RESET;
775 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
776 if (status)
777 goto end;
778
779 /* Clear the core reset and turn on jumbo for receiver. */
780 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
781 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
782 data |= GLOBAL_CFG_TX_STAT_EN;
783 data |= GLOBAL_CFG_RX_STAT_EN;
784 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
785 if (status)
786 goto end;
787
788 /* Enable transmitter, and clear it's reset. */
789 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
790 if (status)
791 goto end;
792 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
793 data |= TX_CFG_EN; /* Enable the transmitter. */
794 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
795 if (status)
796 goto end;
797
798 /* Enable receiver and clear it's reset. */
799 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
800 if (status)
801 goto end;
802 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
803 data |= RX_CFG_EN; /* Enable the receiver. */
804 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
805 if (status)
806 goto end;
807
808 /* Turn on jumbo. */
809 status =
810 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
811 if (status)
812 goto end;
813 status =
814 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
815 if (status)
816 goto end;
817
818 /* Signal to the world that the port is enabled. */
819 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
820end:
821 ql_sem_unlock(qdev, qdev->xg_sem_mask);
822 return status;
823}
824
825/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -0800826static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400827{
828 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
829 rx_ring->lbq_curr_idx++;
830 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
831 rx_ring->lbq_curr_idx = 0;
832 rx_ring->lbq_free_cnt++;
833 return lbq_desc;
834}
835
836/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -0800837static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400838{
839 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
840 rx_ring->sbq_curr_idx++;
841 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
842 rx_ring->sbq_curr_idx = 0;
843 rx_ring->sbq_free_cnt++;
844 return sbq_desc;
845}
846
847/* Update an rx ring index. */
848static void ql_update_cq(struct rx_ring *rx_ring)
849{
850 rx_ring->cnsmr_idx++;
851 rx_ring->curr_entry++;
852 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
853 rx_ring->cnsmr_idx = 0;
854 rx_ring->curr_entry = rx_ring->cq_base;
855 }
856}
857
858static void ql_write_cq_idx(struct rx_ring *rx_ring)
859{
860 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
861}
862
863/* Process (refill) a large buffer queue. */
864static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
865{
Ron Mercer49f21862009-02-23 10:42:16 +0000866 u32 clean_idx = rx_ring->lbq_clean_idx;
867 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400868 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400869 u64 map;
870 int i;
871
872 while (rx_ring->lbq_free_cnt > 16) {
873 for (i = 0; i < 16; i++) {
874 QPRINTK(qdev, RX_STATUS, DEBUG,
875 "lbq: try cleaning clean_idx = %d.\n",
876 clean_idx);
877 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400878 if (lbq_desc->p.lbq_page == NULL) {
879 QPRINTK(qdev, RX_STATUS, DEBUG,
880 "lbq: getting new page for index %d.\n",
881 lbq_desc->index);
882 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
883 if (lbq_desc->p.lbq_page == NULL) {
Ron Mercer79d2b292009-02-12 16:38:34 -0800884 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400885 QPRINTK(qdev, RX_STATUS, ERR,
886 "Couldn't get a page.\n");
887 return;
888 }
889 map = pci_map_page(qdev->pdev,
890 lbq_desc->p.lbq_page,
891 0, PAGE_SIZE,
892 PCI_DMA_FROMDEVICE);
893 if (pci_dma_mapping_error(qdev->pdev, map)) {
Ron Mercer79d2b292009-02-12 16:38:34 -0800894 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerf2603c22009-02-12 16:37:32 -0800895 put_page(lbq_desc->p.lbq_page);
896 lbq_desc->p.lbq_page = NULL;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400897 QPRINTK(qdev, RX_STATUS, ERR,
898 "PCI mapping failed.\n");
899 return;
900 }
901 pci_unmap_addr_set(lbq_desc, mapaddr, map);
902 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
Ron Mercer2c9a0d42009-01-05 18:19:20 -0800903 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400904 }
905 clean_idx++;
906 if (clean_idx == rx_ring->lbq_len)
907 clean_idx = 0;
908 }
909
910 rx_ring->lbq_clean_idx = clean_idx;
911 rx_ring->lbq_prod_idx += 16;
912 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
913 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +0000914 rx_ring->lbq_free_cnt -= 16;
915 }
916
917 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400918 QPRINTK(qdev, RX_STATUS, DEBUG,
919 "lbq: updating prod idx = %d.\n",
920 rx_ring->lbq_prod_idx);
921 ql_write_db_reg(rx_ring->lbq_prod_idx,
922 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400923 }
924}
925
926/* Process (refill) a small buffer queue. */
927static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
928{
Ron Mercer49f21862009-02-23 10:42:16 +0000929 u32 clean_idx = rx_ring->sbq_clean_idx;
930 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400931 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400932 u64 map;
933 int i;
934
935 while (rx_ring->sbq_free_cnt > 16) {
936 for (i = 0; i < 16; i++) {
937 sbq_desc = &rx_ring->sbq[clean_idx];
938 QPRINTK(qdev, RX_STATUS, DEBUG,
939 "sbq: try cleaning clean_idx = %d.\n",
940 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400941 if (sbq_desc->p.skb == NULL) {
942 QPRINTK(qdev, RX_STATUS, DEBUG,
943 "sbq: getting new skb for index %d.\n",
944 sbq_desc->index);
945 sbq_desc->p.skb =
946 netdev_alloc_skb(qdev->ndev,
947 rx_ring->sbq_buf_size);
948 if (sbq_desc->p.skb == NULL) {
949 QPRINTK(qdev, PROBE, ERR,
950 "Couldn't get an skb.\n");
951 rx_ring->sbq_clean_idx = clean_idx;
952 return;
953 }
954 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
955 map = pci_map_single(qdev->pdev,
956 sbq_desc->p.skb->data,
957 rx_ring->sbq_buf_size /
958 2, PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -0800959 if (pci_dma_mapping_error(qdev->pdev, map)) {
960 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
961 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -0800962 dev_kfree_skb_any(sbq_desc->p.skb);
963 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -0800964 return;
965 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400966 pci_unmap_addr_set(sbq_desc, mapaddr, map);
967 pci_unmap_len_set(sbq_desc, maplen,
968 rx_ring->sbq_buf_size / 2);
Ron Mercer2c9a0d42009-01-05 18:19:20 -0800969 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400970 }
971
972 clean_idx++;
973 if (clean_idx == rx_ring->sbq_len)
974 clean_idx = 0;
975 }
976 rx_ring->sbq_clean_idx = clean_idx;
977 rx_ring->sbq_prod_idx += 16;
978 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
979 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +0000980 rx_ring->sbq_free_cnt -= 16;
981 }
982
983 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400984 QPRINTK(qdev, RX_STATUS, DEBUG,
985 "sbq: updating prod idx = %d.\n",
986 rx_ring->sbq_prod_idx);
987 ql_write_db_reg(rx_ring->sbq_prod_idx,
988 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400989 }
990}
991
992static void ql_update_buffer_queues(struct ql_adapter *qdev,
993 struct rx_ring *rx_ring)
994{
995 ql_update_sbq(qdev, rx_ring);
996 ql_update_lbq(qdev, rx_ring);
997}
998
999/* Unmaps tx buffers. Can be called from send() if a pci mapping
1000 * fails at some stage, or from the interrupt when a tx completes.
1001 */
1002static void ql_unmap_send(struct ql_adapter *qdev,
1003 struct tx_ring_desc *tx_ring_desc, int mapped)
1004{
1005 int i;
1006 for (i = 0; i < mapped; i++) {
1007 if (i == 0 || (i == 7 && mapped > 7)) {
1008 /*
1009 * Unmap the skb->data area, or the
1010 * external sglist (AKA the Outbound
1011 * Address List (OAL)).
1012 * If its the zeroeth element, then it's
1013 * the skb->data area. If it's the 7th
1014 * element and there is more than 6 frags,
1015 * then its an OAL.
1016 */
1017 if (i == 7) {
1018 QPRINTK(qdev, TX_DONE, DEBUG,
1019 "unmapping OAL area.\n");
1020 }
1021 pci_unmap_single(qdev->pdev,
1022 pci_unmap_addr(&tx_ring_desc->map[i],
1023 mapaddr),
1024 pci_unmap_len(&tx_ring_desc->map[i],
1025 maplen),
1026 PCI_DMA_TODEVICE);
1027 } else {
1028 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1029 i);
1030 pci_unmap_page(qdev->pdev,
1031 pci_unmap_addr(&tx_ring_desc->map[i],
1032 mapaddr),
1033 pci_unmap_len(&tx_ring_desc->map[i],
1034 maplen), PCI_DMA_TODEVICE);
1035 }
1036 }
1037
1038}
1039
1040/* Map the buffers for this transmit. This will return
1041 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1042 */
1043static int ql_map_send(struct ql_adapter *qdev,
1044 struct ob_mac_iocb_req *mac_iocb_ptr,
1045 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1046{
1047 int len = skb_headlen(skb);
1048 dma_addr_t map;
1049 int frag_idx, err, map_idx = 0;
1050 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1051 int frag_cnt = skb_shinfo(skb)->nr_frags;
1052
1053 if (frag_cnt) {
1054 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1055 }
1056 /*
1057 * Map the skb buffer first.
1058 */
1059 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1060
1061 err = pci_dma_mapping_error(qdev->pdev, map);
1062 if (err) {
1063 QPRINTK(qdev, TX_QUEUED, ERR,
1064 "PCI mapping failed with error: %d\n", err);
1065
1066 return NETDEV_TX_BUSY;
1067 }
1068
1069 tbd->len = cpu_to_le32(len);
1070 tbd->addr = cpu_to_le64(map);
1071 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1072 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1073 map_idx++;
1074
1075 /*
1076 * This loop fills the remainder of the 8 address descriptors
1077 * in the IOCB. If there are more than 7 fragments, then the
1078 * eighth address desc will point to an external list (OAL).
1079 * When this happens, the remainder of the frags will be stored
1080 * in this list.
1081 */
1082 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1083 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1084 tbd++;
1085 if (frag_idx == 6 && frag_cnt > 7) {
1086 /* Let's tack on an sglist.
1087 * Our control block will now
1088 * look like this:
1089 * iocb->seg[0] = skb->data
1090 * iocb->seg[1] = frag[0]
1091 * iocb->seg[2] = frag[1]
1092 * iocb->seg[3] = frag[2]
1093 * iocb->seg[4] = frag[3]
1094 * iocb->seg[5] = frag[4]
1095 * iocb->seg[6] = frag[5]
1096 * iocb->seg[7] = ptr to OAL (external sglist)
1097 * oal->seg[0] = frag[6]
1098 * oal->seg[1] = frag[7]
1099 * oal->seg[2] = frag[8]
1100 * oal->seg[3] = frag[9]
1101 * oal->seg[4] = frag[10]
1102 * etc...
1103 */
1104 /* Tack on the OAL in the eighth segment of IOCB. */
1105 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1106 sizeof(struct oal),
1107 PCI_DMA_TODEVICE);
1108 err = pci_dma_mapping_error(qdev->pdev, map);
1109 if (err) {
1110 QPRINTK(qdev, TX_QUEUED, ERR,
1111 "PCI mapping outbound address list with error: %d\n",
1112 err);
1113 goto map_error;
1114 }
1115
1116 tbd->addr = cpu_to_le64(map);
1117 /*
1118 * The length is the number of fragments
1119 * that remain to be mapped times the length
1120 * of our sglist (OAL).
1121 */
1122 tbd->len =
1123 cpu_to_le32((sizeof(struct tx_buf_desc) *
1124 (frag_cnt - frag_idx)) | TX_DESC_C);
1125 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1126 map);
1127 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1128 sizeof(struct oal));
1129 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1130 map_idx++;
1131 }
1132
1133 map =
1134 pci_map_page(qdev->pdev, frag->page,
1135 frag->page_offset, frag->size,
1136 PCI_DMA_TODEVICE);
1137
1138 err = pci_dma_mapping_error(qdev->pdev, map);
1139 if (err) {
1140 QPRINTK(qdev, TX_QUEUED, ERR,
1141 "PCI mapping frags failed with error: %d.\n",
1142 err);
1143 goto map_error;
1144 }
1145
1146 tbd->addr = cpu_to_le64(map);
1147 tbd->len = cpu_to_le32(frag->size);
1148 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1149 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1150 frag->size);
1151
1152 }
1153 /* Save the number of segments we've mapped. */
1154 tx_ring_desc->map_cnt = map_idx;
1155 /* Terminate the last segment. */
1156 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1157 return NETDEV_TX_OK;
1158
1159map_error:
1160 /*
1161 * If the first frag mapping failed, then i will be zero.
1162 * This causes the unmap of the skb->data area. Otherwise
1163 * we pass in the number of frags that mapped successfully
1164 * so they can be umapped.
1165 */
1166 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1167 return NETDEV_TX_BUSY;
1168}
1169
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001170static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171{
1172 void *temp_addr = skb->data;
1173
1174 /* Undo the skb_reserve(skb,32) we did before
1175 * giving to hardware, and realign data on
1176 * a 2-byte boundary.
1177 */
1178 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1179 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1180 skb_copy_to_linear_data(skb, temp_addr,
1181 (unsigned int)len);
1182}
1183
1184/*
1185 * This function builds an skb for the given inbound
1186 * completion. It will be rewritten for readability in the near
1187 * future, but for not it works well.
1188 */
1189static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1190 struct rx_ring *rx_ring,
1191 struct ib_mac_iocb_rsp *ib_mac_rsp)
1192{
1193 struct bq_desc *lbq_desc;
1194 struct bq_desc *sbq_desc;
1195 struct sk_buff *skb = NULL;
1196 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1197 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1198
1199 /*
1200 * Handle the header buffer if present.
1201 */
1202 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1203 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1204 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1205 /*
1206 * Headers fit nicely into a small buffer.
1207 */
1208 sbq_desc = ql_get_curr_sbuf(rx_ring);
1209 pci_unmap_single(qdev->pdev,
1210 pci_unmap_addr(sbq_desc, mapaddr),
1211 pci_unmap_len(sbq_desc, maplen),
1212 PCI_DMA_FROMDEVICE);
1213 skb = sbq_desc->p.skb;
1214 ql_realign_skb(skb, hdr_len);
1215 skb_put(skb, hdr_len);
1216 sbq_desc->p.skb = NULL;
1217 }
1218
1219 /*
1220 * Handle the data buffer(s).
1221 */
1222 if (unlikely(!length)) { /* Is there data too? */
1223 QPRINTK(qdev, RX_STATUS, DEBUG,
1224 "No Data buffer in this packet.\n");
1225 return skb;
1226 }
1227
1228 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1229 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1230 QPRINTK(qdev, RX_STATUS, DEBUG,
1231 "Headers in small, data of %d bytes in small, combine them.\n", length);
1232 /*
1233 * Data is less than small buffer size so it's
1234 * stuffed in a small buffer.
1235 * For this case we append the data
1236 * from the "data" small buffer to the "header" small
1237 * buffer.
1238 */
1239 sbq_desc = ql_get_curr_sbuf(rx_ring);
1240 pci_dma_sync_single_for_cpu(qdev->pdev,
1241 pci_unmap_addr
1242 (sbq_desc, mapaddr),
1243 pci_unmap_len
1244 (sbq_desc, maplen),
1245 PCI_DMA_FROMDEVICE);
1246 memcpy(skb_put(skb, length),
1247 sbq_desc->p.skb->data, length);
1248 pci_dma_sync_single_for_device(qdev->pdev,
1249 pci_unmap_addr
1250 (sbq_desc,
1251 mapaddr),
1252 pci_unmap_len
1253 (sbq_desc,
1254 maplen),
1255 PCI_DMA_FROMDEVICE);
1256 } else {
1257 QPRINTK(qdev, RX_STATUS, DEBUG,
1258 "%d bytes in a single small buffer.\n", length);
1259 sbq_desc = ql_get_curr_sbuf(rx_ring);
1260 skb = sbq_desc->p.skb;
1261 ql_realign_skb(skb, length);
1262 skb_put(skb, length);
1263 pci_unmap_single(qdev->pdev,
1264 pci_unmap_addr(sbq_desc,
1265 mapaddr),
1266 pci_unmap_len(sbq_desc,
1267 maplen),
1268 PCI_DMA_FROMDEVICE);
1269 sbq_desc->p.skb = NULL;
1270 }
1271 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1272 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1273 QPRINTK(qdev, RX_STATUS, DEBUG,
1274 "Header in small, %d bytes in large. Chain large to small!\n", length);
1275 /*
1276 * The data is in a single large buffer. We
1277 * chain it to the header buffer's skb and let
1278 * it rip.
1279 */
1280 lbq_desc = ql_get_curr_lbuf(rx_ring);
1281 pci_unmap_page(qdev->pdev,
1282 pci_unmap_addr(lbq_desc,
1283 mapaddr),
1284 pci_unmap_len(lbq_desc, maplen),
1285 PCI_DMA_FROMDEVICE);
1286 QPRINTK(qdev, RX_STATUS, DEBUG,
1287 "Chaining page to skb.\n");
1288 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1289 0, length);
1290 skb->len += length;
1291 skb->data_len += length;
1292 skb->truesize += length;
1293 lbq_desc->p.lbq_page = NULL;
1294 } else {
1295 /*
1296 * The headers and data are in a single large buffer. We
1297 * copy it to a new skb and let it go. This can happen with
1298 * jumbo mtu on a non-TCP/UDP frame.
1299 */
1300 lbq_desc = ql_get_curr_lbuf(rx_ring);
1301 skb = netdev_alloc_skb(qdev->ndev, length);
1302 if (skb == NULL) {
1303 QPRINTK(qdev, PROBE, DEBUG,
1304 "No skb available, drop the packet.\n");
1305 return NULL;
1306 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001307 pci_unmap_page(qdev->pdev,
1308 pci_unmap_addr(lbq_desc,
1309 mapaddr),
1310 pci_unmap_len(lbq_desc, maplen),
1311 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001312 skb_reserve(skb, NET_IP_ALIGN);
1313 QPRINTK(qdev, RX_STATUS, DEBUG,
1314 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1315 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1316 0, length);
1317 skb->len += length;
1318 skb->data_len += length;
1319 skb->truesize += length;
1320 length -= length;
1321 lbq_desc->p.lbq_page = NULL;
1322 __pskb_pull_tail(skb,
1323 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1324 VLAN_ETH_HLEN : ETH_HLEN);
1325 }
1326 } else {
1327 /*
1328 * The data is in a chain of large buffers
1329 * pointed to by a small buffer. We loop
1330 * thru and chain them to the our small header
1331 * buffer's skb.
1332 * frags: There are 18 max frags and our small
1333 * buffer will hold 32 of them. The thing is,
1334 * we'll use 3 max for our 9000 byte jumbo
1335 * frames. If the MTU goes up we could
1336 * eventually be in trouble.
1337 */
1338 int size, offset, i = 0;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001339 __le64 *bq, bq_array[8];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001340 sbq_desc = ql_get_curr_sbuf(rx_ring);
1341 pci_unmap_single(qdev->pdev,
1342 pci_unmap_addr(sbq_desc, mapaddr),
1343 pci_unmap_len(sbq_desc, maplen),
1344 PCI_DMA_FROMDEVICE);
1345 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1346 /*
1347 * This is an non TCP/UDP IP frame, so
1348 * the headers aren't split into a small
1349 * buffer. We have to use the small buffer
1350 * that contains our sg list as our skb to
1351 * send upstairs. Copy the sg list here to
1352 * a local buffer and use it to find the
1353 * pages to chain.
1354 */
1355 QPRINTK(qdev, RX_STATUS, DEBUG,
1356 "%d bytes of headers & data in chain of large.\n", length);
1357 skb = sbq_desc->p.skb;
1358 bq = &bq_array[0];
1359 memcpy(bq, skb->data, sizeof(bq_array));
1360 sbq_desc->p.skb = NULL;
1361 skb_reserve(skb, NET_IP_ALIGN);
1362 } else {
1363 QPRINTK(qdev, RX_STATUS, DEBUG,
1364 "Headers in small, %d bytes of data in chain of large.\n", length);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001365 bq = (__le64 *)sbq_desc->p.skb->data;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001366 }
1367 while (length > 0) {
1368 lbq_desc = ql_get_curr_lbuf(rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001369 pci_unmap_page(qdev->pdev,
1370 pci_unmap_addr(lbq_desc,
1371 mapaddr),
1372 pci_unmap_len(lbq_desc,
1373 maplen),
1374 PCI_DMA_FROMDEVICE);
1375 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1376 offset = 0;
1377
1378 QPRINTK(qdev, RX_STATUS, DEBUG,
1379 "Adding page %d to skb for %d bytes.\n",
1380 i, size);
1381 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1382 offset, size);
1383 skb->len += size;
1384 skb->data_len += size;
1385 skb->truesize += size;
1386 length -= size;
1387 lbq_desc->p.lbq_page = NULL;
1388 bq++;
1389 i++;
1390 }
1391 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1392 VLAN_ETH_HLEN : ETH_HLEN);
1393 }
1394 return skb;
1395}
1396
1397/* Process an inbound completion from an rx ring. */
1398static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1399 struct rx_ring *rx_ring,
1400 struct ib_mac_iocb_rsp *ib_mac_rsp)
1401{
1402 struct net_device *ndev = qdev->ndev;
1403 struct sk_buff *skb = NULL;
1404
1405 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1406
1407 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1408 if (unlikely(!skb)) {
1409 QPRINTK(qdev, RX_STATUS, DEBUG,
1410 "No skb available, drop packet.\n");
1411 return;
1412 }
1413
1414 prefetch(skb->data);
1415 skb->dev = ndev;
1416 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1417 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1418 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1419 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1420 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1421 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1422 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1423 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1424 }
1425 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1426 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1427 }
1428 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1429 QPRINTK(qdev, RX_STATUS, ERR,
1430 "Bad checksum for this %s packet.\n",
1431 ((ib_mac_rsp->
1432 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1433 skb->ip_summed = CHECKSUM_NONE;
1434 } else if (qdev->rx_csum &&
1435 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1436 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1437 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1438 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1439 skb->ip_summed = CHECKSUM_UNNECESSARY;
1440 }
1441 qdev->stats.rx_packets++;
1442 qdev->stats.rx_bytes += skb->len;
1443 skb->protocol = eth_type_trans(skb, ndev);
David S. Miller0c8dfc82009-01-27 16:22:32 -08001444 skb_record_rx_queue(skb, rx_ring - &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001445 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1446 QPRINTK(qdev, RX_STATUS, DEBUG,
1447 "Passing a VLAN packet upstream.\n");
Ron Mercer7a9deb62009-02-12 16:36:50 -08001448 vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001449 le16_to_cpu(ib_mac_rsp->vlan_id));
1450 } else {
1451 QPRINTK(qdev, RX_STATUS, DEBUG,
1452 "Passing a normal packet upstream.\n");
Ron Mercer7a9deb62009-02-12 16:36:50 -08001453 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001454 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001455}
1456
1457/* Process an outbound completion from an rx ring. */
1458static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1459 struct ob_mac_iocb_rsp *mac_rsp)
1460{
1461 struct tx_ring *tx_ring;
1462 struct tx_ring_desc *tx_ring_desc;
1463
1464 QL_DUMP_OB_MAC_RSP(mac_rsp);
1465 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1466 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1467 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1468 qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1469 qdev->stats.tx_packets++;
1470 dev_kfree_skb(tx_ring_desc->skb);
1471 tx_ring_desc->skb = NULL;
1472
1473 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1474 OB_MAC_IOCB_RSP_S |
1475 OB_MAC_IOCB_RSP_L |
1476 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1477 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1478 QPRINTK(qdev, TX_DONE, WARNING,
1479 "Total descriptor length did not match transfer length.\n");
1480 }
1481 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1482 QPRINTK(qdev, TX_DONE, WARNING,
1483 "Frame too short to be legal, not sent.\n");
1484 }
1485 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1486 QPRINTK(qdev, TX_DONE, WARNING,
1487 "Frame too long, but sent anyway.\n");
1488 }
1489 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1490 QPRINTK(qdev, TX_DONE, WARNING,
1491 "PCI backplane error. Frame not sent.\n");
1492 }
1493 }
1494 atomic_inc(&tx_ring->tx_count);
1495}
1496
1497/* Fire up a handler to reset the MPI processor. */
1498void ql_queue_fw_error(struct ql_adapter *qdev)
1499{
1500 netif_stop_queue(qdev->ndev);
1501 netif_carrier_off(qdev->ndev);
1502 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1503}
1504
1505void ql_queue_asic_error(struct ql_adapter *qdev)
1506{
1507 netif_stop_queue(qdev->ndev);
1508 netif_carrier_off(qdev->ndev);
1509 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001510 /* Clear adapter up bit to signal the recovery
1511 * process that it shouldn't kill the reset worker
1512 * thread
1513 */
1514 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001515 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1516}
1517
1518static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1519 struct ib_ae_iocb_rsp *ib_ae_rsp)
1520{
1521 switch (ib_ae_rsp->event) {
1522 case MGMT_ERR_EVENT:
1523 QPRINTK(qdev, RX_ERR, ERR,
1524 "Management Processor Fatal Error.\n");
1525 ql_queue_fw_error(qdev);
1526 return;
1527
1528 case CAM_LOOKUP_ERR_EVENT:
1529 QPRINTK(qdev, LINK, ERR,
1530 "Multiple CAM hits lookup occurred.\n");
1531 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1532 ql_queue_asic_error(qdev);
1533 return;
1534
1535 case SOFT_ECC_ERROR_EVENT:
1536 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1537 ql_queue_asic_error(qdev);
1538 break;
1539
1540 case PCI_ERR_ANON_BUF_RD:
1541 QPRINTK(qdev, RX_ERR, ERR,
1542 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1543 ib_ae_rsp->q_id);
1544 ql_queue_asic_error(qdev);
1545 break;
1546
1547 default:
1548 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1549 ib_ae_rsp->event);
1550 ql_queue_asic_error(qdev);
1551 break;
1552 }
1553}
1554
1555static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1556{
1557 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001558 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001559 struct ob_mac_iocb_rsp *net_rsp = NULL;
1560 int count = 0;
1561
1562 /* While there are entries in the completion queue. */
1563 while (prod != rx_ring->cnsmr_idx) {
1564
1565 QPRINTK(qdev, RX_STATUS, DEBUG,
1566 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1567 prod, rx_ring->cnsmr_idx);
1568
1569 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1570 rmb();
1571 switch (net_rsp->opcode) {
1572
1573 case OPCODE_OB_MAC_TSO_IOCB:
1574 case OPCODE_OB_MAC_IOCB:
1575 ql_process_mac_tx_intr(qdev, net_rsp);
1576 break;
1577 default:
1578 QPRINTK(qdev, RX_STATUS, DEBUG,
1579 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1580 net_rsp->opcode);
1581 }
1582 count++;
1583 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001584 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001585 }
1586 ql_write_cq_idx(rx_ring);
1587 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1588 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1589 if (atomic_read(&tx_ring->queue_stopped) &&
1590 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1591 /*
1592 * The queue got stopped because the tx_ring was full.
1593 * Wake it up, because it's now at least 25% empty.
1594 */
1595 netif_wake_queue(qdev->ndev);
1596 }
1597
1598 return count;
1599}
1600
1601static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1602{
1603 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001604 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001605 struct ql_net_rsp_iocb *net_rsp;
1606 int count = 0;
1607
1608 /* While there are entries in the completion queue. */
1609 while (prod != rx_ring->cnsmr_idx) {
1610
1611 QPRINTK(qdev, RX_STATUS, DEBUG,
1612 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1613 prod, rx_ring->cnsmr_idx);
1614
1615 net_rsp = rx_ring->curr_entry;
1616 rmb();
1617 switch (net_rsp->opcode) {
1618 case OPCODE_IB_MAC_IOCB:
1619 ql_process_mac_rx_intr(qdev, rx_ring,
1620 (struct ib_mac_iocb_rsp *)
1621 net_rsp);
1622 break;
1623
1624 case OPCODE_IB_AE_IOCB:
1625 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1626 net_rsp);
1627 break;
1628 default:
1629 {
1630 QPRINTK(qdev, RX_STATUS, DEBUG,
1631 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1632 net_rsp->opcode);
1633 }
1634 }
1635 count++;
1636 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001637 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001638 if (count == budget)
1639 break;
1640 }
1641 ql_update_buffer_queues(qdev, rx_ring);
1642 ql_write_cq_idx(rx_ring);
1643 return count;
1644}
1645
1646static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1647{
1648 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1649 struct ql_adapter *qdev = rx_ring->qdev;
1650 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1651
1652 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1653 rx_ring->cq_id);
1654
1655 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001656 __napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001657 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1658 }
1659 return work_done;
1660}
1661
1662static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1663{
1664 struct ql_adapter *qdev = netdev_priv(ndev);
1665
1666 qdev->vlgrp = grp;
1667 if (grp) {
1668 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1669 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1670 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1671 } else {
1672 QPRINTK(qdev, IFUP, DEBUG,
1673 "Turning off VLAN in NIC_RCV_CFG.\n");
1674 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1675 }
1676}
1677
1678static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1679{
1680 struct ql_adapter *qdev = netdev_priv(ndev);
1681 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00001682 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001683
Ron Mercercc288f52009-02-23 10:42:14 +00001684 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1685 if (status)
1686 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001687 spin_lock(&qdev->hw_lock);
1688 if (ql_set_mac_addr_reg
1689 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1690 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1691 }
1692 spin_unlock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00001693 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001694}
1695
1696static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1697{
1698 struct ql_adapter *qdev = netdev_priv(ndev);
1699 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00001700 int status;
1701
1702 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1703 if (status)
1704 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001705
1706 spin_lock(&qdev->hw_lock);
1707 if (ql_set_mac_addr_reg
1708 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1709 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1710 }
1711 spin_unlock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00001712 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001713
1714}
1715
1716/* Worker thread to process a given rx_ring that is dedicated
1717 * to outbound completions.
1718 */
1719static void ql_tx_clean(struct work_struct *work)
1720{
1721 struct rx_ring *rx_ring =
1722 container_of(work, struct rx_ring, rx_work.work);
1723 ql_clean_outbound_rx_ring(rx_ring);
1724 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1725
1726}
1727
1728/* Worker thread to process a given rx_ring that is dedicated
1729 * to inbound completions.
1730 */
1731static void ql_rx_clean(struct work_struct *work)
1732{
1733 struct rx_ring *rx_ring =
1734 container_of(work, struct rx_ring, rx_work.work);
1735 ql_clean_inbound_rx_ring(rx_ring, 64);
1736 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1737}
1738
1739/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1740static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1741{
1742 struct rx_ring *rx_ring = dev_id;
1743 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1744 &rx_ring->rx_work, 0);
1745 return IRQ_HANDLED;
1746}
1747
1748/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1749static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1750{
1751 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08001752 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001753 return IRQ_HANDLED;
1754}
1755
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756/* This handles a fatal error, MPI activity, and the default
1757 * rx_ring in an MSI-X multiple vector environment.
1758 * In MSI/Legacy environment it also process the rest of
1759 * the rx_rings.
1760 */
1761static irqreturn_t qlge_isr(int irq, void *dev_id)
1762{
1763 struct rx_ring *rx_ring = dev_id;
1764 struct ql_adapter *qdev = rx_ring->qdev;
1765 struct intr_context *intr_context = &qdev->intr_context[0];
1766 u32 var;
1767 int i;
1768 int work_done = 0;
1769
Ron Mercerbb0d2152008-10-20 10:30:26 -07001770 spin_lock(&qdev->hw_lock);
1771 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1772 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1773 spin_unlock(&qdev->hw_lock);
1774 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001775 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07001776 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001777
Ron Mercerbb0d2152008-10-20 10:30:26 -07001778 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001779
1780 /*
1781 * Check for fatal error.
1782 */
1783 if (var & STS_FE) {
1784 ql_queue_asic_error(qdev);
1785 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1786 var = ql_read32(qdev, ERR_STS);
1787 QPRINTK(qdev, INTR, ERR,
1788 "Resetting chip. Error Status Register = 0x%x\n", var);
1789 return IRQ_HANDLED;
1790 }
1791
1792 /*
1793 * Check MPI processor activity.
1794 */
1795 if (var & STS_PI) {
1796 /*
1797 * We've got an async event or mailbox completion.
1798 * Handle it and clear the source of the interrupt.
1799 */
1800 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1801 ql_disable_completion_interrupt(qdev, intr_context->intr);
1802 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1803 &qdev->mpi_work, 0);
1804 work_done++;
1805 }
1806
1807 /*
1808 * Check the default queue and wake handler if active.
1809 */
1810 rx_ring = &qdev->rx_ring[0];
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001811 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001812 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1813 ql_disable_completion_interrupt(qdev, intr_context->intr);
1814 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1815 &rx_ring->rx_work, 0);
1816 work_done++;
1817 }
1818
1819 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1820 /*
1821 * Start the DPC for each active queue.
1822 */
1823 for (i = 1; i < qdev->rx_ring_count; i++) {
1824 rx_ring = &qdev->rx_ring[i];
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001825 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001826 rx_ring->cnsmr_idx) {
1827 QPRINTK(qdev, INTR, INFO,
1828 "Waking handler for rx_ring[%d].\n", i);
1829 ql_disable_completion_interrupt(qdev,
1830 intr_context->
1831 intr);
1832 if (i < qdev->rss_ring_first_cq_id)
1833 queue_delayed_work_on(rx_ring->cpu,
1834 qdev->q_workqueue,
1835 &rx_ring->rx_work,
1836 0);
1837 else
Ben Hutchings288379f2009-01-19 16:43:59 -08001838 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001839 work_done++;
1840 }
1841 }
1842 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07001843 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001844 return work_done ? IRQ_HANDLED : IRQ_NONE;
1845}
1846
1847static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1848{
1849
1850 if (skb_is_gso(skb)) {
1851 int err;
1852 if (skb_header_cloned(skb)) {
1853 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1854 if (err)
1855 return err;
1856 }
1857
1858 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1859 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1860 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1861 mac_iocb_ptr->total_hdrs_len =
1862 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1863 mac_iocb_ptr->net_trans_offset =
1864 cpu_to_le16(skb_network_offset(skb) |
1865 skb_transport_offset(skb)
1866 << OB_MAC_TRANSPORT_HDR_SHIFT);
1867 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1868 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1869 if (likely(skb->protocol == htons(ETH_P_IP))) {
1870 struct iphdr *iph = ip_hdr(skb);
1871 iph->check = 0;
1872 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1873 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1874 iph->daddr, 0,
1875 IPPROTO_TCP,
1876 0);
1877 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1878 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1879 tcp_hdr(skb)->check =
1880 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1881 &ipv6_hdr(skb)->daddr,
1882 0, IPPROTO_TCP, 0);
1883 }
1884 return 1;
1885 }
1886 return 0;
1887}
1888
1889static void ql_hw_csum_setup(struct sk_buff *skb,
1890 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1891{
1892 int len;
1893 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08001894 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1896 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1897 mac_iocb_ptr->net_trans_offset =
1898 cpu_to_le16(skb_network_offset(skb) |
1899 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1900
1901 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1902 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1903 if (likely(iph->protocol == IPPROTO_TCP)) {
1904 check = &(tcp_hdr(skb)->check);
1905 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1906 mac_iocb_ptr->total_hdrs_len =
1907 cpu_to_le16(skb_transport_offset(skb) +
1908 (tcp_hdr(skb)->doff << 2));
1909 } else {
1910 check = &(udp_hdr(skb)->check);
1911 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1912 mac_iocb_ptr->total_hdrs_len =
1913 cpu_to_le16(skb_transport_offset(skb) +
1914 sizeof(struct udphdr));
1915 }
1916 *check = ~csum_tcpudp_magic(iph->saddr,
1917 iph->daddr, len, iph->protocol, 0);
1918}
1919
1920static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1921{
1922 struct tx_ring_desc *tx_ring_desc;
1923 struct ob_mac_iocb_req *mac_iocb_ptr;
1924 struct ql_adapter *qdev = netdev_priv(ndev);
1925 int tso;
1926 struct tx_ring *tx_ring;
1927 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1928
1929 tx_ring = &qdev->tx_ring[tx_ring_idx];
1930
1931 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1932 QPRINTK(qdev, TX_QUEUED, INFO,
1933 "%s: shutting down tx queue %d du to lack of resources.\n",
1934 __func__, tx_ring_idx);
1935 netif_stop_queue(ndev);
1936 atomic_inc(&tx_ring->queue_stopped);
1937 return NETDEV_TX_BUSY;
1938 }
1939 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1940 mac_iocb_ptr = tx_ring_desc->queue_entry;
1941 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942
1943 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1944 mac_iocb_ptr->tid = tx_ring_desc->index;
1945 /* We use the upper 32-bits to store the tx queue for this IO.
1946 * When we get the completion we can use it to establish the context.
1947 */
1948 mac_iocb_ptr->txq_idx = tx_ring_idx;
1949 tx_ring_desc->skb = skb;
1950
1951 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1952
1953 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1954 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1955 vlan_tx_tag_get(skb));
1956 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1957 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1958 }
1959 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1960 if (tso < 0) {
1961 dev_kfree_skb_any(skb);
1962 return NETDEV_TX_OK;
1963 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1964 ql_hw_csum_setup(skb,
1965 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1966 }
Ron Mercer0d979f72009-02-12 16:38:03 -08001967 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
1968 NETDEV_TX_OK) {
1969 QPRINTK(qdev, TX_QUEUED, ERR,
1970 "Could not map the segments.\n");
1971 return NETDEV_TX_BUSY;
1972 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001973 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1974 tx_ring->prod_idx++;
1975 if (tx_ring->prod_idx == tx_ring->wq_len)
1976 tx_ring->prod_idx = 0;
1977 wmb();
1978
1979 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1980 ndev->trans_start = jiffies;
1981 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1982 tx_ring->prod_idx, skb->len);
1983
1984 atomic_dec(&tx_ring->tx_count);
1985 return NETDEV_TX_OK;
1986}
1987
1988static void ql_free_shadow_space(struct ql_adapter *qdev)
1989{
1990 if (qdev->rx_ring_shadow_reg_area) {
1991 pci_free_consistent(qdev->pdev,
1992 PAGE_SIZE,
1993 qdev->rx_ring_shadow_reg_area,
1994 qdev->rx_ring_shadow_reg_dma);
1995 qdev->rx_ring_shadow_reg_area = NULL;
1996 }
1997 if (qdev->tx_ring_shadow_reg_area) {
1998 pci_free_consistent(qdev->pdev,
1999 PAGE_SIZE,
2000 qdev->tx_ring_shadow_reg_area,
2001 qdev->tx_ring_shadow_reg_dma);
2002 qdev->tx_ring_shadow_reg_area = NULL;
2003 }
2004}
2005
2006static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2007{
2008 qdev->rx_ring_shadow_reg_area =
2009 pci_alloc_consistent(qdev->pdev,
2010 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2011 if (qdev->rx_ring_shadow_reg_area == NULL) {
2012 QPRINTK(qdev, IFUP, ERR,
2013 "Allocation of RX shadow space failed.\n");
2014 return -ENOMEM;
2015 }
2016 qdev->tx_ring_shadow_reg_area =
2017 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2018 &qdev->tx_ring_shadow_reg_dma);
2019 if (qdev->tx_ring_shadow_reg_area == NULL) {
2020 QPRINTK(qdev, IFUP, ERR,
2021 "Allocation of TX shadow space failed.\n");
2022 goto err_wqp_sh_area;
2023 }
2024 return 0;
2025
2026err_wqp_sh_area:
2027 pci_free_consistent(qdev->pdev,
2028 PAGE_SIZE,
2029 qdev->rx_ring_shadow_reg_area,
2030 qdev->rx_ring_shadow_reg_dma);
2031 return -ENOMEM;
2032}
2033
2034static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2035{
2036 struct tx_ring_desc *tx_ring_desc;
2037 int i;
2038 struct ob_mac_iocb_req *mac_iocb_ptr;
2039
2040 mac_iocb_ptr = tx_ring->wq_base;
2041 tx_ring_desc = tx_ring->q;
2042 for (i = 0; i < tx_ring->wq_len; i++) {
2043 tx_ring_desc->index = i;
2044 tx_ring_desc->skb = NULL;
2045 tx_ring_desc->queue_entry = mac_iocb_ptr;
2046 mac_iocb_ptr++;
2047 tx_ring_desc++;
2048 }
2049 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2050 atomic_set(&tx_ring->queue_stopped, 0);
2051}
2052
2053static void ql_free_tx_resources(struct ql_adapter *qdev,
2054 struct tx_ring *tx_ring)
2055{
2056 if (tx_ring->wq_base) {
2057 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2058 tx_ring->wq_base, tx_ring->wq_base_dma);
2059 tx_ring->wq_base = NULL;
2060 }
2061 kfree(tx_ring->q);
2062 tx_ring->q = NULL;
2063}
2064
2065static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2066 struct tx_ring *tx_ring)
2067{
2068 tx_ring->wq_base =
2069 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2070 &tx_ring->wq_base_dma);
2071
2072 if ((tx_ring->wq_base == NULL)
2073 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2074 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2075 return -ENOMEM;
2076 }
2077 tx_ring->q =
2078 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2079 if (tx_ring->q == NULL)
2080 goto err;
2081
2082 return 0;
2083err:
2084 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2085 tx_ring->wq_base, tx_ring->wq_base_dma);
2086 return -ENOMEM;
2087}
2088
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002089static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002090{
2091 int i;
2092 struct bq_desc *lbq_desc;
2093
2094 for (i = 0; i < rx_ring->lbq_len; i++) {
2095 lbq_desc = &rx_ring->lbq[i];
2096 if (lbq_desc->p.lbq_page) {
2097 pci_unmap_page(qdev->pdev,
2098 pci_unmap_addr(lbq_desc, mapaddr),
2099 pci_unmap_len(lbq_desc, maplen),
2100 PCI_DMA_FROMDEVICE);
2101
2102 put_page(lbq_desc->p.lbq_page);
2103 lbq_desc->p.lbq_page = NULL;
2104 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002105 }
2106}
2107
2108/*
2109 * Allocate and map a page for each element of the lbq.
2110 */
2111static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2112 struct rx_ring *rx_ring)
2113{
2114 int i;
2115 struct bq_desc *lbq_desc;
2116 u64 map;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002117 __le64 *bq = rx_ring->lbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002118
2119 for (i = 0; i < rx_ring->lbq_len; i++) {
2120 lbq_desc = &rx_ring->lbq[i];
2121 memset(lbq_desc, 0, sizeof(lbq_desc));
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002122 lbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 lbq_desc->index = i;
2124 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2125 if (unlikely(!lbq_desc->p.lbq_page)) {
2126 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2127 goto mem_error;
2128 } else {
2129 map = pci_map_page(qdev->pdev,
2130 lbq_desc->p.lbq_page,
2131 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2132 if (pci_dma_mapping_error(qdev->pdev, map)) {
2133 QPRINTK(qdev, IFUP, ERR,
2134 "PCI mapping failed.\n");
2135 goto mem_error;
2136 }
2137 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2138 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002139 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002140 }
2141 bq++;
2142 }
2143 return 0;
2144mem_error:
2145 ql_free_lbq_buffers(qdev, rx_ring);
2146 return -ENOMEM;
2147}
2148
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002149static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002150{
2151 int i;
2152 struct bq_desc *sbq_desc;
2153
2154 for (i = 0; i < rx_ring->sbq_len; i++) {
2155 sbq_desc = &rx_ring->sbq[i];
2156 if (sbq_desc == NULL) {
2157 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2158 return;
2159 }
2160 if (sbq_desc->p.skb) {
2161 pci_unmap_single(qdev->pdev,
2162 pci_unmap_addr(sbq_desc, mapaddr),
2163 pci_unmap_len(sbq_desc, maplen),
2164 PCI_DMA_FROMDEVICE);
2165 dev_kfree_skb(sbq_desc->p.skb);
2166 sbq_desc->p.skb = NULL;
2167 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002168 }
2169}
2170
2171/* Allocate and map an skb for each element of the sbq. */
2172static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2173 struct rx_ring *rx_ring)
2174{
2175 int i;
2176 struct bq_desc *sbq_desc;
2177 struct sk_buff *skb;
2178 u64 map;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002179 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002180
2181 for (i = 0; i < rx_ring->sbq_len; i++) {
2182 sbq_desc = &rx_ring->sbq[i];
2183 memset(sbq_desc, 0, sizeof(sbq_desc));
2184 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002185 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002186 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2187 if (unlikely(!skb)) {
2188 /* Better luck next round */
2189 QPRINTK(qdev, IFUP, ERR,
2190 "small buff alloc failed for %d bytes at index %d.\n",
2191 rx_ring->sbq_buf_size, i);
2192 goto mem_err;
2193 }
2194 skb_reserve(skb, QLGE_SB_PAD);
2195 sbq_desc->p.skb = skb;
2196 /*
2197 * Map only half the buffer. Because the
2198 * other half may get some data copied to it
2199 * when the completion arrives.
2200 */
2201 map = pci_map_single(qdev->pdev,
2202 skb->data,
2203 rx_ring->sbq_buf_size / 2,
2204 PCI_DMA_FROMDEVICE);
2205 if (pci_dma_mapping_error(qdev->pdev, map)) {
2206 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2207 goto mem_err;
2208 }
2209 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2210 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002211 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002212 bq++;
2213 }
2214 return 0;
2215mem_err:
2216 ql_free_sbq_buffers(qdev, rx_ring);
2217 return -ENOMEM;
2218}
2219
2220static void ql_free_rx_resources(struct ql_adapter *qdev,
2221 struct rx_ring *rx_ring)
2222{
2223 if (rx_ring->sbq_len)
2224 ql_free_sbq_buffers(qdev, rx_ring);
2225 if (rx_ring->lbq_len)
2226 ql_free_lbq_buffers(qdev, rx_ring);
2227
2228 /* Free the small buffer queue. */
2229 if (rx_ring->sbq_base) {
2230 pci_free_consistent(qdev->pdev,
2231 rx_ring->sbq_size,
2232 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2233 rx_ring->sbq_base = NULL;
2234 }
2235
2236 /* Free the small buffer queue control blocks. */
2237 kfree(rx_ring->sbq);
2238 rx_ring->sbq = NULL;
2239
2240 /* Free the large buffer queue. */
2241 if (rx_ring->lbq_base) {
2242 pci_free_consistent(qdev->pdev,
2243 rx_ring->lbq_size,
2244 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2245 rx_ring->lbq_base = NULL;
2246 }
2247
2248 /* Free the large buffer queue control blocks. */
2249 kfree(rx_ring->lbq);
2250 rx_ring->lbq = NULL;
2251
2252 /* Free the rx queue. */
2253 if (rx_ring->cq_base) {
2254 pci_free_consistent(qdev->pdev,
2255 rx_ring->cq_size,
2256 rx_ring->cq_base, rx_ring->cq_base_dma);
2257 rx_ring->cq_base = NULL;
2258 }
2259}
2260
2261/* Allocate queues and buffers for this completions queue based
2262 * on the values in the parameter structure. */
2263static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2264 struct rx_ring *rx_ring)
2265{
2266
2267 /*
2268 * Allocate the completion queue for this rx_ring.
2269 */
2270 rx_ring->cq_base =
2271 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2272 &rx_ring->cq_base_dma);
2273
2274 if (rx_ring->cq_base == NULL) {
2275 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2276 return -ENOMEM;
2277 }
2278
2279 if (rx_ring->sbq_len) {
2280 /*
2281 * Allocate small buffer queue.
2282 */
2283 rx_ring->sbq_base =
2284 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2285 &rx_ring->sbq_base_dma);
2286
2287 if (rx_ring->sbq_base == NULL) {
2288 QPRINTK(qdev, IFUP, ERR,
2289 "Small buffer queue allocation failed.\n");
2290 goto err_mem;
2291 }
2292
2293 /*
2294 * Allocate small buffer queue control blocks.
2295 */
2296 rx_ring->sbq =
2297 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2298 GFP_KERNEL);
2299 if (rx_ring->sbq == NULL) {
2300 QPRINTK(qdev, IFUP, ERR,
2301 "Small buffer queue control block allocation failed.\n");
2302 goto err_mem;
2303 }
2304
2305 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2306 QPRINTK(qdev, IFUP, ERR,
2307 "Small buffer allocation failed.\n");
2308 goto err_mem;
2309 }
2310 }
2311
2312 if (rx_ring->lbq_len) {
2313 /*
2314 * Allocate large buffer queue.
2315 */
2316 rx_ring->lbq_base =
2317 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2318 &rx_ring->lbq_base_dma);
2319
2320 if (rx_ring->lbq_base == NULL) {
2321 QPRINTK(qdev, IFUP, ERR,
2322 "Large buffer queue allocation failed.\n");
2323 goto err_mem;
2324 }
2325 /*
2326 * Allocate large buffer queue control blocks.
2327 */
2328 rx_ring->lbq =
2329 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2330 GFP_KERNEL);
2331 if (rx_ring->lbq == NULL) {
2332 QPRINTK(qdev, IFUP, ERR,
2333 "Large buffer queue control block allocation failed.\n");
2334 goto err_mem;
2335 }
2336
2337 /*
2338 * Allocate the buffers.
2339 */
2340 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2341 QPRINTK(qdev, IFUP, ERR,
2342 "Large buffer allocation failed.\n");
2343 goto err_mem;
2344 }
2345 }
2346
2347 return 0;
2348
2349err_mem:
2350 ql_free_rx_resources(qdev, rx_ring);
2351 return -ENOMEM;
2352}
2353
2354static void ql_tx_ring_clean(struct ql_adapter *qdev)
2355{
2356 struct tx_ring *tx_ring;
2357 struct tx_ring_desc *tx_ring_desc;
2358 int i, j;
2359
2360 /*
2361 * Loop through all queues and free
2362 * any resources.
2363 */
2364 for (j = 0; j < qdev->tx_ring_count; j++) {
2365 tx_ring = &qdev->tx_ring[j];
2366 for (i = 0; i < tx_ring->wq_len; i++) {
2367 tx_ring_desc = &tx_ring->q[i];
2368 if (tx_ring_desc && tx_ring_desc->skb) {
2369 QPRINTK(qdev, IFDOWN, ERR,
2370 "Freeing lost SKB %p, from queue %d, index %d.\n",
2371 tx_ring_desc->skb, j,
2372 tx_ring_desc->index);
2373 ql_unmap_send(qdev, tx_ring_desc,
2374 tx_ring_desc->map_cnt);
2375 dev_kfree_skb(tx_ring_desc->skb);
2376 tx_ring_desc->skb = NULL;
2377 }
2378 }
2379 }
2380}
2381
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002382static void ql_free_mem_resources(struct ql_adapter *qdev)
2383{
2384 int i;
2385
2386 for (i = 0; i < qdev->tx_ring_count; i++)
2387 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2388 for (i = 0; i < qdev->rx_ring_count; i++)
2389 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2390 ql_free_shadow_space(qdev);
2391}
2392
2393static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2394{
2395 int i;
2396
2397 /* Allocate space for our shadow registers and such. */
2398 if (ql_alloc_shadow_space(qdev))
2399 return -ENOMEM;
2400
2401 for (i = 0; i < qdev->rx_ring_count; i++) {
2402 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2403 QPRINTK(qdev, IFUP, ERR,
2404 "RX resource allocation failed.\n");
2405 goto err_mem;
2406 }
2407 }
2408 /* Allocate tx queue resources */
2409 for (i = 0; i < qdev->tx_ring_count; i++) {
2410 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2411 QPRINTK(qdev, IFUP, ERR,
2412 "TX resource allocation failed.\n");
2413 goto err_mem;
2414 }
2415 }
2416 return 0;
2417
2418err_mem:
2419 ql_free_mem_resources(qdev);
2420 return -ENOMEM;
2421}
2422
2423/* Set up the rx ring control block and pass it to the chip.
2424 * The control block is defined as
2425 * "Completion Queue Initialization Control Block", or cqicb.
2426 */
2427static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2428{
2429 struct cqicb *cqicb = &rx_ring->cqicb;
2430 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2431 (rx_ring->cq_id * sizeof(u64) * 4);
2432 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2433 (rx_ring->cq_id * sizeof(u64) * 4);
2434 void __iomem *doorbell_area =
2435 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2436 int err = 0;
2437 u16 bq_len;
2438
2439 /* Set up the shadow registers for this ring. */
2440 rx_ring->prod_idx_sh_reg = shadow_reg;
2441 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2442 shadow_reg += sizeof(u64);
2443 shadow_reg_dma += sizeof(u64);
2444 rx_ring->lbq_base_indirect = shadow_reg;
2445 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2446 shadow_reg += sizeof(u64);
2447 shadow_reg_dma += sizeof(u64);
2448 rx_ring->sbq_base_indirect = shadow_reg;
2449 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2450
2451 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002452 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002453 rx_ring->cnsmr_idx = 0;
2454 rx_ring->curr_entry = rx_ring->cq_base;
2455
2456 /* PCI doorbell mem area + 0x04 for valid register */
2457 rx_ring->valid_db_reg = doorbell_area + 0x04;
2458
2459 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002460 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002461
2462 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002463 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002464
2465 memset((void *)cqicb, 0, sizeof(struct cqicb));
2466 cqicb->msix_vect = rx_ring->irq;
2467
Ron Mercer459caf52009-01-04 17:08:11 -08002468 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2469 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002470
Ron Mercer97345522009-01-09 11:31:50 +00002471 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002472
Ron Mercer97345522009-01-09 11:31:50 +00002473 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002474
2475 /*
2476 * Set up the control block load flags.
2477 */
2478 cqicb->flags = FLAGS_LC | /* Load queue base address */
2479 FLAGS_LV | /* Load MSI-X vector */
2480 FLAGS_LI; /* Load irq delay values */
2481 if (rx_ring->lbq_len) {
2482 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2483 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
Ron Mercer97345522009-01-09 11:31:50 +00002484 cqicb->lbq_addr =
2485 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002486 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2487 (u16) rx_ring->lbq_buf_size;
2488 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2489 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2490 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002491 cqicb->lbq_len = cpu_to_le16(bq_len);
2492 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2493 rx_ring->lbq_curr_idx = 0;
2494 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2495 rx_ring->lbq_free_cnt = 16;
2496 }
2497 if (rx_ring->sbq_len) {
2498 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2499 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
Ron Mercer97345522009-01-09 11:31:50 +00002500 cqicb->sbq_addr =
2501 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002502 cqicb->sbq_buf_size =
2503 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
Ron Mercer459caf52009-01-04 17:08:11 -08002504 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2505 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002506 cqicb->sbq_len = cpu_to_le16(bq_len);
2507 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2508 rx_ring->sbq_curr_idx = 0;
2509 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2510 rx_ring->sbq_free_cnt = 16;
2511 }
2512 switch (rx_ring->type) {
2513 case TX_Q:
2514 /* If there's only one interrupt, then we use
2515 * worker threads to process the outbound
2516 * completion handling rx_rings. We do this so
2517 * they can be run on multiple CPUs. There is
2518 * room to play with this more where we would only
2519 * run in a worker if there are more than x number
2520 * of outbound completions on the queue and more
2521 * than one queue active. Some threshold that
2522 * would indicate a benefit in spite of the cost
2523 * of a context switch.
2524 * If there's more than one interrupt, then the
2525 * outbound completions are processed in the ISR.
2526 */
2527 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2528 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2529 else {
2530 /* With all debug warnings on we see a WARN_ON message
2531 * when we free the skb in the interrupt context.
2532 */
2533 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2534 }
2535 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2536 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2537 break;
2538 case DEFAULT_Q:
2539 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2540 cqicb->irq_delay = 0;
2541 cqicb->pkt_delay = 0;
2542 break;
2543 case RX_Q:
2544 /* Inbound completion handling rx_rings run in
2545 * separate NAPI contexts.
2546 */
2547 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2548 64);
2549 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2550 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2551 break;
2552 default:
2553 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2554 rx_ring->type);
2555 }
2556 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2557 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2558 CFG_LCQ, rx_ring->cq_id);
2559 if (err) {
2560 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2561 return err;
2562 }
2563 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2564 /*
2565 * Advance the producer index for the buffer queues.
2566 */
2567 wmb();
2568 if (rx_ring->lbq_len)
2569 ql_write_db_reg(rx_ring->lbq_prod_idx,
2570 rx_ring->lbq_prod_idx_db_reg);
2571 if (rx_ring->sbq_len)
2572 ql_write_db_reg(rx_ring->sbq_prod_idx,
2573 rx_ring->sbq_prod_idx_db_reg);
2574 return err;
2575}
2576
2577static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2578{
2579 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2580 void __iomem *doorbell_area =
2581 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2582 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2583 (tx_ring->wq_id * sizeof(u64));
2584 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2585 (tx_ring->wq_id * sizeof(u64));
2586 int err = 0;
2587
2588 /*
2589 * Assign doorbell registers for this tx_ring.
2590 */
2591 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002592 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002593 tx_ring->prod_idx = 0;
2594 /* TX PCI doorbell mem area + 0x04 */
2595 tx_ring->valid_db_reg = doorbell_area + 0x04;
2596
2597 /*
2598 * Assign shadow registers for this tx_ring.
2599 */
2600 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2601 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2602
2603 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2604 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2605 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2606 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2607 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002608 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002609
Ron Mercer97345522009-01-09 11:31:50 +00002610 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002611
2612 ql_init_tx_ring(qdev, tx_ring);
2613
2614 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2615 (u16) tx_ring->wq_id);
2616 if (err) {
2617 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2618 return err;
2619 }
2620 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2621 return err;
2622}
2623
2624static void ql_disable_msix(struct ql_adapter *qdev)
2625{
2626 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2627 pci_disable_msix(qdev->pdev);
2628 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2629 kfree(qdev->msi_x_entry);
2630 qdev->msi_x_entry = NULL;
2631 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2632 pci_disable_msi(qdev->pdev);
2633 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2634 }
2635}
2636
2637static void ql_enable_msix(struct ql_adapter *qdev)
2638{
2639 int i;
2640
2641 qdev->intr_count = 1;
2642 /* Get the MSIX vectors. */
2643 if (irq_type == MSIX_IRQ) {
2644 /* Try to alloc space for the msix struct,
2645 * if it fails then go to MSI/legacy.
2646 */
2647 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2648 sizeof(struct msix_entry),
2649 GFP_KERNEL);
2650 if (!qdev->msi_x_entry) {
2651 irq_type = MSI_IRQ;
2652 goto msi;
2653 }
2654
2655 for (i = 0; i < qdev->rx_ring_count; i++)
2656 qdev->msi_x_entry[i].entry = i;
2657
2658 if (!pci_enable_msix
2659 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2660 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2661 qdev->intr_count = qdev->rx_ring_count;
2662 QPRINTK(qdev, IFUP, INFO,
2663 "MSI-X Enabled, got %d vectors.\n",
2664 qdev->intr_count);
2665 return;
2666 } else {
2667 kfree(qdev->msi_x_entry);
2668 qdev->msi_x_entry = NULL;
2669 QPRINTK(qdev, IFUP, WARNING,
2670 "MSI-X Enable failed, trying MSI.\n");
2671 irq_type = MSI_IRQ;
2672 }
2673 }
2674msi:
2675 if (irq_type == MSI_IRQ) {
2676 if (!pci_enable_msi(qdev->pdev)) {
2677 set_bit(QL_MSI_ENABLED, &qdev->flags);
2678 QPRINTK(qdev, IFUP, INFO,
2679 "Running with MSI interrupts.\n");
2680 return;
2681 }
2682 }
2683 irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002684 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2685}
2686
2687/*
2688 * Here we build the intr_context structures based on
2689 * our rx_ring count and intr vector count.
2690 * The intr_context structure is used to hook each vector
2691 * to possibly different handlers.
2692 */
2693static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2694{
2695 int i = 0;
2696 struct intr_context *intr_context = &qdev->intr_context[0];
2697
2698 ql_enable_msix(qdev);
2699
2700 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2701 /* Each rx_ring has it's
2702 * own intr_context since we have separate
2703 * vectors for each queue.
2704 * This only true when MSI-X is enabled.
2705 */
2706 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2707 qdev->rx_ring[i].irq = i;
2708 intr_context->intr = i;
2709 intr_context->qdev = qdev;
2710 /*
2711 * We set up each vectors enable/disable/read bits so
2712 * there's no bit/mask calculations in the critical path.
2713 */
2714 intr_context->intr_en_mask =
2715 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2716 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2717 | i;
2718 intr_context->intr_dis_mask =
2719 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2720 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2721 INTR_EN_IHD | i;
2722 intr_context->intr_read_mask =
2723 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2724 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2725 i;
2726
2727 if (i == 0) {
2728 /*
2729 * Default queue handles bcast/mcast plus
2730 * async events. Needs buffers.
2731 */
2732 intr_context->handler = qlge_isr;
2733 sprintf(intr_context->name, "%s-default-queue",
2734 qdev->ndev->name);
2735 } else if (i < qdev->rss_ring_first_cq_id) {
2736 /*
2737 * Outbound queue is for outbound completions only.
2738 */
2739 intr_context->handler = qlge_msix_tx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002740 sprintf(intr_context->name, "%s-tx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002741 qdev->ndev->name, i);
2742 } else {
2743 /*
2744 * Inbound queues handle unicast frames only.
2745 */
2746 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002747 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002748 qdev->ndev->name, i);
2749 }
2750 }
2751 } else {
2752 /*
2753 * All rx_rings use the same intr_context since
2754 * there is only one vector.
2755 */
2756 intr_context->intr = 0;
2757 intr_context->qdev = qdev;
2758 /*
2759 * We set up each vectors enable/disable/read bits so
2760 * there's no bit/mask calculations in the critical path.
2761 */
2762 intr_context->intr_en_mask =
2763 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2764 intr_context->intr_dis_mask =
2765 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2766 INTR_EN_TYPE_DISABLE;
2767 intr_context->intr_read_mask =
2768 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2769 /*
2770 * Single interrupt means one handler for all rings.
2771 */
2772 intr_context->handler = qlge_isr;
2773 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2774 for (i = 0; i < qdev->rx_ring_count; i++)
2775 qdev->rx_ring[i].irq = 0;
2776 }
2777}
2778
2779static void ql_free_irq(struct ql_adapter *qdev)
2780{
2781 int i;
2782 struct intr_context *intr_context = &qdev->intr_context[0];
2783
2784 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2785 if (intr_context->hooked) {
2786 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2787 free_irq(qdev->msi_x_entry[i].vector,
2788 &qdev->rx_ring[i]);
2789 QPRINTK(qdev, IFDOWN, ERR,
2790 "freeing msix interrupt %d.\n", i);
2791 } else {
2792 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2793 QPRINTK(qdev, IFDOWN, ERR,
2794 "freeing msi interrupt %d.\n", i);
2795 }
2796 }
2797 }
2798 ql_disable_msix(qdev);
2799}
2800
2801static int ql_request_irq(struct ql_adapter *qdev)
2802{
2803 int i;
2804 int status = 0;
2805 struct pci_dev *pdev = qdev->pdev;
2806 struct intr_context *intr_context = &qdev->intr_context[0];
2807
2808 ql_resolve_queues_to_irqs(qdev);
2809
2810 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2811 atomic_set(&intr_context->irq_cnt, 0);
2812 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2813 status = request_irq(qdev->msi_x_entry[i].vector,
2814 intr_context->handler,
2815 0,
2816 intr_context->name,
2817 &qdev->rx_ring[i]);
2818 if (status) {
2819 QPRINTK(qdev, IFUP, ERR,
2820 "Failed request for MSIX interrupt %d.\n",
2821 i);
2822 goto err_irq;
2823 } else {
2824 QPRINTK(qdev, IFUP, INFO,
2825 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2826 i,
2827 qdev->rx_ring[i].type ==
2828 DEFAULT_Q ? "DEFAULT_Q" : "",
2829 qdev->rx_ring[i].type ==
2830 TX_Q ? "TX_Q" : "",
2831 qdev->rx_ring[i].type ==
2832 RX_Q ? "RX_Q" : "", intr_context->name);
2833 }
2834 } else {
2835 QPRINTK(qdev, IFUP, DEBUG,
2836 "trying msi or legacy interrupts.\n");
2837 QPRINTK(qdev, IFUP, DEBUG,
2838 "%s: irq = %d.\n", __func__, pdev->irq);
2839 QPRINTK(qdev, IFUP, DEBUG,
2840 "%s: context->name = %s.\n", __func__,
2841 intr_context->name);
2842 QPRINTK(qdev, IFUP, DEBUG,
2843 "%s: dev_id = 0x%p.\n", __func__,
2844 &qdev->rx_ring[0]);
2845 status =
2846 request_irq(pdev->irq, qlge_isr,
2847 test_bit(QL_MSI_ENABLED,
2848 &qdev->
2849 flags) ? 0 : IRQF_SHARED,
2850 intr_context->name, &qdev->rx_ring[0]);
2851 if (status)
2852 goto err_irq;
2853
2854 QPRINTK(qdev, IFUP, ERR,
2855 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2856 i,
2857 qdev->rx_ring[0].type ==
2858 DEFAULT_Q ? "DEFAULT_Q" : "",
2859 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2860 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2861 intr_context->name);
2862 }
2863 intr_context->hooked = 1;
2864 }
2865 return status;
2866err_irq:
2867 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2868 ql_free_irq(qdev);
2869 return status;
2870}
2871
2872static int ql_start_rss(struct ql_adapter *qdev)
2873{
2874 struct ricb *ricb = &qdev->ricb;
2875 int status = 0;
2876 int i;
2877 u8 *hash_id = (u8 *) ricb->hash_cq_id;
2878
2879 memset((void *)ricb, 0, sizeof(ricb));
2880
2881 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2882 ricb->flags =
2883 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2884 RSS_RT6);
2885 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2886
2887 /*
2888 * Fill out the Indirection Table.
2889 */
Ron Mercerdef48b62009-02-12 16:38:18 -08002890 for (i = 0; i < 256; i++)
2891 hash_id[i] = i & (qdev->rss_ring_count - 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002892
2893 /*
2894 * Random values for the IPv6 and IPv4 Hash Keys.
2895 */
2896 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2897 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2898
2899 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2900
2901 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2902 if (status) {
2903 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2904 return status;
2905 }
2906 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2907 return status;
2908}
2909
2910/* Initialize the frame-to-queue routing. */
2911static int ql_route_initialize(struct ql_adapter *qdev)
2912{
2913 int status = 0;
2914 int i;
2915
Ron Mercer8587ea32009-02-23 10:42:15 +00002916 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
2917 if (status)
2918 return status;
2919
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002920 /* Clear all the entries in the routing table. */
2921 for (i = 0; i < 16; i++) {
2922 status = ql_set_routing_reg(qdev, i, 0, 0);
2923 if (status) {
2924 QPRINTK(qdev, IFUP, ERR,
2925 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00002926 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002927 }
2928 }
2929
2930 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2931 if (status) {
2932 QPRINTK(qdev, IFUP, ERR,
2933 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00002934 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002935 }
2936 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2937 if (status) {
2938 QPRINTK(qdev, IFUP, ERR,
2939 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00002940 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941 }
2942 /* If we have more than one inbound queue, then turn on RSS in the
2943 * routing block.
2944 */
2945 if (qdev->rss_ring_count > 1) {
2946 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2947 RT_IDX_RSS_MATCH, 1);
2948 if (status) {
2949 QPRINTK(qdev, IFUP, ERR,
2950 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00002951 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002952 }
2953 }
2954
2955 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2956 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00002957 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002958 QPRINTK(qdev, IFUP, ERR,
2959 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00002960exit:
2961 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002962 return status;
2963}
2964
Ron Mercerbb58b5b2009-02-23 10:42:13 +00002965static int ql_cam_route_initialize(struct ql_adapter *qdev)
2966{
2967 int status;
2968
Ron Mercercc288f52009-02-23 10:42:14 +00002969 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2970 if (status)
2971 return status;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00002972 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
2973 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00002974 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00002975 if (status) {
2976 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
2977 return status;
2978 }
2979
2980 status = ql_route_initialize(qdev);
2981 if (status)
2982 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
2983
2984 return status;
2985}
2986
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002987static int ql_adapter_initialize(struct ql_adapter *qdev)
2988{
2989 u32 value, mask;
2990 int i;
2991 int status = 0;
2992
2993 /*
2994 * Set up the System register to halt on errors.
2995 */
2996 value = SYS_EFE | SYS_FAE;
2997 mask = value << 16;
2998 ql_write32(qdev, SYS, mask | value);
2999
3000 /* Set the default queue. */
3001 value = NIC_RCV_CFG_DFQ;
3002 mask = NIC_RCV_CFG_DFQ_MASK;
3003 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3004
3005 /* Set the MPI interrupt to enabled. */
3006 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3007
3008 /* Enable the function, set pagesize, enable error checking. */
3009 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3010 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3011
3012 /* Set/clear header splitting. */
3013 mask = FSC_VM_PAGESIZE_MASK |
3014 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3015 ql_write32(qdev, FSC, mask | value);
3016
3017 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3018 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3019
3020 /* Start up the rx queues. */
3021 for (i = 0; i < qdev->rx_ring_count; i++) {
3022 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3023 if (status) {
3024 QPRINTK(qdev, IFUP, ERR,
3025 "Failed to start rx ring[%d].\n", i);
3026 return status;
3027 }
3028 }
3029
3030 /* If there is more than one inbound completion queue
3031 * then download a RICB to configure RSS.
3032 */
3033 if (qdev->rss_ring_count > 1) {
3034 status = ql_start_rss(qdev);
3035 if (status) {
3036 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3037 return status;
3038 }
3039 }
3040
3041 /* Start up the tx queues. */
3042 for (i = 0; i < qdev->tx_ring_count; i++) {
3043 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3044 if (status) {
3045 QPRINTK(qdev, IFUP, ERR,
3046 "Failed to start tx ring[%d].\n", i);
3047 return status;
3048 }
3049 }
3050
3051 status = ql_port_initialize(qdev);
3052 if (status) {
3053 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3054 return status;
3055 }
3056
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003057 /* Set up the MAC address and frame routing filter. */
3058 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003059 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003060 QPRINTK(qdev, IFUP, ERR,
3061 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003062 return status;
3063 }
3064
3065 /* Start NAPI for the RSS queues. */
3066 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3067 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3068 i);
3069 napi_enable(&qdev->rx_ring[i].napi);
3070 }
3071
3072 return status;
3073}
3074
3075/* Issue soft reset to chip. */
3076static int ql_adapter_reset(struct ql_adapter *qdev)
3077{
3078 u32 value;
3079 int max_wait_time;
3080 int status = 0;
3081 int resetCnt = 0;
3082
3083#define MAX_RESET_CNT 1
3084issueReset:
3085 resetCnt++;
3086 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3087 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3088 /* Wait for reset to complete. */
3089 max_wait_time = 3;
3090 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3091 max_wait_time);
3092 do {
3093 value = ql_read32(qdev, RST_FO);
3094 if ((value & RST_FO_FR) == 0)
3095 break;
3096
3097 ssleep(1);
3098 } while ((--max_wait_time));
3099 if (value & RST_FO_FR) {
3100 QPRINTK(qdev, IFDOWN, ERR,
3101 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3102 if (resetCnt < MAX_RESET_CNT)
3103 goto issueReset;
3104 }
3105 if (max_wait_time == 0) {
3106 status = -ETIMEDOUT;
3107 QPRINTK(qdev, IFDOWN, ERR,
3108 "ETIMEOUT!!! errored out of resetting the chip!\n");
3109 }
3110
3111 return status;
3112}
3113
3114static void ql_display_dev_info(struct net_device *ndev)
3115{
3116 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3117
3118 QPRINTK(qdev, PROBE, INFO,
3119 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3120 "XG Roll = %d, XG Rev = %d.\n",
3121 qdev->func,
3122 qdev->chip_rev_id & 0x0000000f,
3123 qdev->chip_rev_id >> 4 & 0x0000000f,
3124 qdev->chip_rev_id >> 8 & 0x0000000f,
3125 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003126 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003127}
3128
3129static int ql_adapter_down(struct ql_adapter *qdev)
3130{
3131 struct net_device *ndev = qdev->ndev;
3132 int i, status = 0;
3133 struct rx_ring *rx_ring;
3134
3135 netif_stop_queue(ndev);
3136 netif_carrier_off(ndev);
3137
Ron Mercer6497b602009-02-12 16:37:13 -08003138 /* Don't kill the reset worker thread if we
3139 * are in the process of recovery.
3140 */
3141 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3142 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003143 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3144 cancel_delayed_work_sync(&qdev->mpi_work);
3145
3146 /* The default queue at index 0 is always processed in
3147 * a workqueue.
3148 */
3149 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3150
3151 /* The rest of the rx_rings are processed in
3152 * a workqueue only if it's a single interrupt
3153 * environment (MSI/Legacy).
3154 */
Roel Kluinc0620762008-12-25 17:23:50 -08003155 for (i = 1; i < qdev->rx_ring_count; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003156 rx_ring = &qdev->rx_ring[i];
3157 /* Only the RSS rings use NAPI on multi irq
3158 * environment. Outbound completion processing
3159 * is done in interrupt context.
3160 */
3161 if (i >= qdev->rss_ring_first_cq_id) {
3162 napi_disable(&rx_ring->napi);
3163 } else {
3164 cancel_delayed_work_sync(&rx_ring->rx_work);
3165 }
3166 }
3167
3168 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3169
3170 ql_disable_interrupts(qdev);
3171
3172 ql_tx_ring_clean(qdev);
3173
3174 spin_lock(&qdev->hw_lock);
3175 status = ql_adapter_reset(qdev);
3176 if (status)
3177 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3178 qdev->func);
3179 spin_unlock(&qdev->hw_lock);
3180 return status;
3181}
3182
3183static int ql_adapter_up(struct ql_adapter *qdev)
3184{
3185 int err = 0;
3186
3187 spin_lock(&qdev->hw_lock);
3188 err = ql_adapter_initialize(qdev);
3189 if (err) {
3190 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3191 spin_unlock(&qdev->hw_lock);
3192 goto err_init;
3193 }
3194 spin_unlock(&qdev->hw_lock);
3195 set_bit(QL_ADAPTER_UP, &qdev->flags);
3196 ql_enable_interrupts(qdev);
3197 ql_enable_all_completion_interrupts(qdev);
3198 if ((ql_read32(qdev, STS) & qdev->port_init)) {
3199 netif_carrier_on(qdev->ndev);
3200 netif_start_queue(qdev->ndev);
3201 }
3202
3203 return 0;
3204err_init:
3205 ql_adapter_reset(qdev);
3206 return err;
3207}
3208
3209static int ql_cycle_adapter(struct ql_adapter *qdev)
3210{
3211 int status;
3212
3213 status = ql_adapter_down(qdev);
3214 if (status)
3215 goto error;
3216
3217 status = ql_adapter_up(qdev);
3218 if (status)
3219 goto error;
3220
3221 return status;
3222error:
3223 QPRINTK(qdev, IFUP, ALERT,
3224 "Driver up/down cycle failed, closing device\n");
3225 rtnl_lock();
3226 dev_close(qdev->ndev);
3227 rtnl_unlock();
3228 return status;
3229}
3230
3231static void ql_release_adapter_resources(struct ql_adapter *qdev)
3232{
3233 ql_free_mem_resources(qdev);
3234 ql_free_irq(qdev);
3235}
3236
3237static int ql_get_adapter_resources(struct ql_adapter *qdev)
3238{
3239 int status = 0;
3240
3241 if (ql_alloc_mem_resources(qdev)) {
3242 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3243 return -ENOMEM;
3244 }
3245 status = ql_request_irq(qdev);
3246 if (status)
3247 goto err_irq;
3248 return status;
3249err_irq:
3250 ql_free_mem_resources(qdev);
3251 return status;
3252}
3253
3254static int qlge_close(struct net_device *ndev)
3255{
3256 struct ql_adapter *qdev = netdev_priv(ndev);
3257
3258 /*
3259 * Wait for device to recover from a reset.
3260 * (Rarely happens, but possible.)
3261 */
3262 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3263 msleep(1);
3264 ql_adapter_down(qdev);
3265 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003266 return 0;
3267}
3268
3269static int ql_configure_rings(struct ql_adapter *qdev)
3270{
3271 int i;
3272 struct rx_ring *rx_ring;
3273 struct tx_ring *tx_ring;
3274 int cpu_cnt = num_online_cpus();
3275
3276 /*
3277 * For each processor present we allocate one
3278 * rx_ring for outbound completions, and one
3279 * rx_ring for inbound completions. Plus there is
3280 * always the one default queue. For the CPU
3281 * counts we end up with the following rx_rings:
3282 * rx_ring count =
3283 * one default queue +
3284 * (CPU count * outbound completion rx_ring) +
3285 * (CPU count * inbound (RSS) completion rx_ring)
3286 * To keep it simple we limit the total number of
3287 * queues to < 32, so we truncate CPU to 8.
3288 * This limitation can be removed when requested.
3289 */
3290
Ron Mercer683d46a2009-01-09 11:31:53 +00003291 if (cpu_cnt > MAX_CPUS)
3292 cpu_cnt = MAX_CPUS;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003293
3294 /*
3295 * rx_ring[0] is always the default queue.
3296 */
3297 /* Allocate outbound completion ring for each CPU. */
3298 qdev->tx_ring_count = cpu_cnt;
3299 /* Allocate inbound completion (RSS) ring for each CPU. */
3300 qdev->rss_ring_count = cpu_cnt;
3301 /* cq_id for the first inbound ring handler. */
3302 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3303 /*
3304 * qdev->rx_ring_count:
3305 * Total number of rx_rings. This includes the one
3306 * default queue, a number of outbound completion
3307 * handler rx_rings, and the number of inbound
3308 * completion handler rx_rings.
3309 */
3310 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3311
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003312 for (i = 0; i < qdev->tx_ring_count; i++) {
3313 tx_ring = &qdev->tx_ring[i];
3314 memset((void *)tx_ring, 0, sizeof(tx_ring));
3315 tx_ring->qdev = qdev;
3316 tx_ring->wq_id = i;
3317 tx_ring->wq_len = qdev->tx_ring_size;
3318 tx_ring->wq_size =
3319 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3320
3321 /*
3322 * The completion queue ID for the tx rings start
3323 * immediately after the default Q ID, which is zero.
3324 */
3325 tx_ring->cq_id = i + 1;
3326 }
3327
3328 for (i = 0; i < qdev->rx_ring_count; i++) {
3329 rx_ring = &qdev->rx_ring[i];
3330 memset((void *)rx_ring, 0, sizeof(rx_ring));
3331 rx_ring->qdev = qdev;
3332 rx_ring->cq_id = i;
3333 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3334 if (i == 0) { /* Default queue at index 0. */
3335 /*
3336 * Default queue handles bcast/mcast plus
3337 * async events. Needs buffers.
3338 */
3339 rx_ring->cq_len = qdev->rx_ring_size;
3340 rx_ring->cq_size =
3341 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3342 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3343 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003344 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003345 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3346 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3347 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003348 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003349 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3350 rx_ring->type = DEFAULT_Q;
3351 } else if (i < qdev->rss_ring_first_cq_id) {
3352 /*
3353 * Outbound queue handles outbound completions only.
3354 */
3355 /* outbound cq is same size as tx_ring it services. */
3356 rx_ring->cq_len = qdev->tx_ring_size;
3357 rx_ring->cq_size =
3358 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3359 rx_ring->lbq_len = 0;
3360 rx_ring->lbq_size = 0;
3361 rx_ring->lbq_buf_size = 0;
3362 rx_ring->sbq_len = 0;
3363 rx_ring->sbq_size = 0;
3364 rx_ring->sbq_buf_size = 0;
3365 rx_ring->type = TX_Q;
3366 } else { /* Inbound completions (RSS) queues */
3367 /*
3368 * Inbound queues handle unicast frames only.
3369 */
3370 rx_ring->cq_len = qdev->rx_ring_size;
3371 rx_ring->cq_size =
3372 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3373 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3374 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003375 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003376 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3377 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3378 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003379 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003380 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3381 rx_ring->type = RX_Q;
3382 }
3383 }
3384 return 0;
3385}
3386
3387static int qlge_open(struct net_device *ndev)
3388{
3389 int err = 0;
3390 struct ql_adapter *qdev = netdev_priv(ndev);
3391
3392 err = ql_configure_rings(qdev);
3393 if (err)
3394 return err;
3395
3396 err = ql_get_adapter_resources(qdev);
3397 if (err)
3398 goto error_up;
3399
3400 err = ql_adapter_up(qdev);
3401 if (err)
3402 goto error_up;
3403
3404 return err;
3405
3406error_up:
3407 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003408 return err;
3409}
3410
3411static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3412{
3413 struct ql_adapter *qdev = netdev_priv(ndev);
3414
3415 if (ndev->mtu == 1500 && new_mtu == 9000) {
3416 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3417 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3418 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3419 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3420 (ndev->mtu == 9000 && new_mtu == 9000)) {
3421 return 0;
3422 } else
3423 return -EINVAL;
3424 ndev->mtu = new_mtu;
3425 return 0;
3426}
3427
3428static struct net_device_stats *qlge_get_stats(struct net_device
3429 *ndev)
3430{
3431 struct ql_adapter *qdev = netdev_priv(ndev);
3432 return &qdev->stats;
3433}
3434
3435static void qlge_set_multicast_list(struct net_device *ndev)
3436{
3437 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3438 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00003439 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003440
Ron Mercercc288f52009-02-23 10:42:14 +00003441 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3442 if (status)
3443 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003444 spin_lock(&qdev->hw_lock);
3445 /*
3446 * Set or clear promiscuous mode if a
3447 * transition is taking place.
3448 */
3449 if (ndev->flags & IFF_PROMISC) {
3450 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3451 if (ql_set_routing_reg
3452 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3453 QPRINTK(qdev, HW, ERR,
3454 "Failed to set promiscous mode.\n");
3455 } else {
3456 set_bit(QL_PROMISCUOUS, &qdev->flags);
3457 }
3458 }
3459 } else {
3460 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3461 if (ql_set_routing_reg
3462 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3463 QPRINTK(qdev, HW, ERR,
3464 "Failed to clear promiscous mode.\n");
3465 } else {
3466 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3467 }
3468 }
3469 }
3470
3471 /*
3472 * Set or clear all multicast mode if a
3473 * transition is taking place.
3474 */
3475 if ((ndev->flags & IFF_ALLMULTI) ||
3476 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3477 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3478 if (ql_set_routing_reg
3479 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3480 QPRINTK(qdev, HW, ERR,
3481 "Failed to set all-multi mode.\n");
3482 } else {
3483 set_bit(QL_ALLMULTI, &qdev->flags);
3484 }
3485 }
3486 } else {
3487 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3488 if (ql_set_routing_reg
3489 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3490 QPRINTK(qdev, HW, ERR,
3491 "Failed to clear all-multi mode.\n");
3492 } else {
3493 clear_bit(QL_ALLMULTI, &qdev->flags);
3494 }
3495 }
3496 }
3497
3498 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00003499 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3500 if (status)
3501 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003502 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3503 i++, mc_ptr = mc_ptr->next)
3504 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3505 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3506 QPRINTK(qdev, HW, ERR,
3507 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00003508 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003509 goto exit;
3510 }
Ron Mercercc288f52009-02-23 10:42:14 +00003511 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003512 if (ql_set_routing_reg
3513 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3514 QPRINTK(qdev, HW, ERR,
3515 "Failed to set multicast match mode.\n");
3516 } else {
3517 set_bit(QL_ALLMULTI, &qdev->flags);
3518 }
3519 }
3520exit:
3521 spin_unlock(&qdev->hw_lock);
Ron Mercer8587ea32009-02-23 10:42:15 +00003522 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003523}
3524
3525static int qlge_set_mac_address(struct net_device *ndev, void *p)
3526{
3527 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3528 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00003529 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003530
3531 if (netif_running(ndev))
3532 return -EBUSY;
3533
3534 if (!is_valid_ether_addr(addr->sa_data))
3535 return -EADDRNOTAVAIL;
3536 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3537
Ron Mercercc288f52009-02-23 10:42:14 +00003538 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3539 if (status)
3540 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003541 spin_lock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00003542 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3543 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544 spin_unlock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00003545 if (status)
3546 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3547 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3548 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549}
3550
3551static void qlge_tx_timeout(struct net_device *ndev)
3552{
3553 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003554 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003555}
3556
3557static void ql_asic_reset_work(struct work_struct *work)
3558{
3559 struct ql_adapter *qdev =
3560 container_of(work, struct ql_adapter, asic_reset_work.work);
3561 ql_cycle_adapter(qdev);
3562}
3563
3564static void ql_get_board_info(struct ql_adapter *qdev)
3565{
3566 qdev->func =
3567 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3568 if (qdev->func) {
3569 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3570 qdev->port_link_up = STS_PL1;
3571 qdev->port_init = STS_PI1;
3572 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3573 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3574 } else {
3575 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3576 qdev->port_link_up = STS_PL0;
3577 qdev->port_init = STS_PI0;
3578 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3579 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3580 }
3581 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3582}
3583
3584static void ql_release_all(struct pci_dev *pdev)
3585{
3586 struct net_device *ndev = pci_get_drvdata(pdev);
3587 struct ql_adapter *qdev = netdev_priv(ndev);
3588
3589 if (qdev->workqueue) {
3590 destroy_workqueue(qdev->workqueue);
3591 qdev->workqueue = NULL;
3592 }
3593 if (qdev->q_workqueue) {
3594 destroy_workqueue(qdev->q_workqueue);
3595 qdev->q_workqueue = NULL;
3596 }
3597 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003598 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003599 if (qdev->doorbell_area)
3600 iounmap(qdev->doorbell_area);
3601 pci_release_regions(pdev);
3602 pci_set_drvdata(pdev, NULL);
3603}
3604
3605static int __devinit ql_init_device(struct pci_dev *pdev,
3606 struct net_device *ndev, int cards_found)
3607{
3608 struct ql_adapter *qdev = netdev_priv(ndev);
3609 int pos, err = 0;
3610 u16 val16;
3611
3612 memset((void *)qdev, 0, sizeof(qdev));
3613 err = pci_enable_device(pdev);
3614 if (err) {
3615 dev_err(&pdev->dev, "PCI device enable failed.\n");
3616 return err;
3617 }
3618
3619 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3620 if (pos <= 0) {
3621 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3622 "aborting.\n");
3623 goto err_out;
3624 } else {
3625 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3626 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3627 val16 |= (PCI_EXP_DEVCTL_CERE |
3628 PCI_EXP_DEVCTL_NFERE |
3629 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3630 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3631 }
3632
3633 err = pci_request_regions(pdev, DRV_NAME);
3634 if (err) {
3635 dev_err(&pdev->dev, "PCI region request failed.\n");
3636 goto err_out;
3637 }
3638
3639 pci_set_master(pdev);
3640 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3641 set_bit(QL_DMA64, &qdev->flags);
3642 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3643 } else {
3644 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3645 if (!err)
3646 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3647 }
3648
3649 if (err) {
3650 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3651 goto err_out;
3652 }
3653
3654 pci_set_drvdata(pdev, ndev);
3655 qdev->reg_base =
3656 ioremap_nocache(pci_resource_start(pdev, 1),
3657 pci_resource_len(pdev, 1));
3658 if (!qdev->reg_base) {
3659 dev_err(&pdev->dev, "Register mapping failed.\n");
3660 err = -ENOMEM;
3661 goto err_out;
3662 }
3663
3664 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3665 qdev->doorbell_area =
3666 ioremap_nocache(pci_resource_start(pdev, 3),
3667 pci_resource_len(pdev, 3));
3668 if (!qdev->doorbell_area) {
3669 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3670 err = -ENOMEM;
3671 goto err_out;
3672 }
3673
3674 ql_get_board_info(qdev);
3675 qdev->ndev = ndev;
3676 qdev->pdev = pdev;
3677 qdev->msg_enable = netif_msg_init(debug, default_msg);
3678 spin_lock_init(&qdev->hw_lock);
3679 spin_lock_init(&qdev->stats_lock);
3680
3681 /* make sure the EEPROM is good */
3682 err = ql_get_flash_params(qdev);
3683 if (err) {
3684 dev_err(&pdev->dev, "Invalid FLASH.\n");
3685 goto err_out;
3686 }
3687
3688 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3689 goto err_out;
3690
3691 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3692 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3693
3694 /* Set up the default ring sizes. */
3695 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3696 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3697
3698 /* Set up the coalescing parameters. */
3699 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3700 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3701 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3702 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3703
3704 /*
3705 * Set up the operating parameters.
3706 */
3707 qdev->rx_csum = 1;
3708
3709 qdev->q_workqueue = create_workqueue(ndev->name);
3710 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3711 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3712 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3713 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3714
3715 if (!cards_found) {
3716 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3717 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3718 DRV_NAME, DRV_VERSION);
3719 }
3720 return 0;
3721err_out:
3722 ql_release_all(pdev);
3723 pci_disable_device(pdev);
3724 return err;
3725}
3726
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003727
3728static const struct net_device_ops qlge_netdev_ops = {
3729 .ndo_open = qlge_open,
3730 .ndo_stop = qlge_close,
3731 .ndo_start_xmit = qlge_send,
3732 .ndo_change_mtu = qlge_change_mtu,
3733 .ndo_get_stats = qlge_get_stats,
3734 .ndo_set_multicast_list = qlge_set_multicast_list,
3735 .ndo_set_mac_address = qlge_set_mac_address,
3736 .ndo_validate_addr = eth_validate_addr,
3737 .ndo_tx_timeout = qlge_tx_timeout,
3738 .ndo_vlan_rx_register = ql_vlan_rx_register,
3739 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3740 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3741};
3742
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003743static int __devinit qlge_probe(struct pci_dev *pdev,
3744 const struct pci_device_id *pci_entry)
3745{
3746 struct net_device *ndev = NULL;
3747 struct ql_adapter *qdev = NULL;
3748 static int cards_found = 0;
3749 int err = 0;
3750
3751 ndev = alloc_etherdev(sizeof(struct ql_adapter));
3752 if (!ndev)
3753 return -ENOMEM;
3754
3755 err = ql_init_device(pdev, ndev, cards_found);
3756 if (err < 0) {
3757 free_netdev(ndev);
3758 return err;
3759 }
3760
3761 qdev = netdev_priv(ndev);
3762 SET_NETDEV_DEV(ndev, &pdev->dev);
3763 ndev->features = (0
3764 | NETIF_F_IP_CSUM
3765 | NETIF_F_SG
3766 | NETIF_F_TSO
3767 | NETIF_F_TSO6
3768 | NETIF_F_TSO_ECN
3769 | NETIF_F_HW_VLAN_TX
3770 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3771
3772 if (test_bit(QL_DMA64, &qdev->flags))
3773 ndev->features |= NETIF_F_HIGHDMA;
3774
3775 /*
3776 * Set up net_device structure.
3777 */
3778 ndev->tx_queue_len = qdev->tx_ring_size;
3779 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003780
3781 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003782 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003783 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003784
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003785 err = register_netdev(ndev);
3786 if (err) {
3787 dev_err(&pdev->dev, "net device registration failed.\n");
3788 ql_release_all(pdev);
3789 pci_disable_device(pdev);
3790 return err;
3791 }
3792 netif_carrier_off(ndev);
3793 netif_stop_queue(ndev);
3794 ql_display_dev_info(ndev);
3795 cards_found++;
3796 return 0;
3797}
3798
3799static void __devexit qlge_remove(struct pci_dev *pdev)
3800{
3801 struct net_device *ndev = pci_get_drvdata(pdev);
3802 unregister_netdev(ndev);
3803 ql_release_all(pdev);
3804 pci_disable_device(pdev);
3805 free_netdev(ndev);
3806}
3807
3808/*
3809 * This callback is called by the PCI subsystem whenever
3810 * a PCI bus error is detected.
3811 */
3812static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3813 enum pci_channel_state state)
3814{
3815 struct net_device *ndev = pci_get_drvdata(pdev);
3816 struct ql_adapter *qdev = netdev_priv(ndev);
3817
3818 if (netif_running(ndev))
3819 ql_adapter_down(qdev);
3820
3821 pci_disable_device(pdev);
3822
3823 /* Request a slot reset. */
3824 return PCI_ERS_RESULT_NEED_RESET;
3825}
3826
3827/*
3828 * This callback is called after the PCI buss has been reset.
3829 * Basically, this tries to restart the card from scratch.
3830 * This is a shortened version of the device probe/discovery code,
3831 * it resembles the first-half of the () routine.
3832 */
3833static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3834{
3835 struct net_device *ndev = pci_get_drvdata(pdev);
3836 struct ql_adapter *qdev = netdev_priv(ndev);
3837
3838 if (pci_enable_device(pdev)) {
3839 QPRINTK(qdev, IFUP, ERR,
3840 "Cannot re-enable PCI device after reset.\n");
3841 return PCI_ERS_RESULT_DISCONNECT;
3842 }
3843
3844 pci_set_master(pdev);
3845
3846 netif_carrier_off(ndev);
3847 netif_stop_queue(ndev);
3848 ql_adapter_reset(qdev);
3849
3850 /* Make sure the EEPROM is good */
3851 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3852
3853 if (!is_valid_ether_addr(ndev->perm_addr)) {
3854 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3855 return PCI_ERS_RESULT_DISCONNECT;
3856 }
3857
3858 return PCI_ERS_RESULT_RECOVERED;
3859}
3860
3861static void qlge_io_resume(struct pci_dev *pdev)
3862{
3863 struct net_device *ndev = pci_get_drvdata(pdev);
3864 struct ql_adapter *qdev = netdev_priv(ndev);
3865
3866 pci_set_master(pdev);
3867
3868 if (netif_running(ndev)) {
3869 if (ql_adapter_up(qdev)) {
3870 QPRINTK(qdev, IFUP, ERR,
3871 "Device initialization failed after reset.\n");
3872 return;
3873 }
3874 }
3875
3876 netif_device_attach(ndev);
3877}
3878
3879static struct pci_error_handlers qlge_err_handler = {
3880 .error_detected = qlge_io_error_detected,
3881 .slot_reset = qlge_io_slot_reset,
3882 .resume = qlge_io_resume,
3883};
3884
3885static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3886{
3887 struct net_device *ndev = pci_get_drvdata(pdev);
3888 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer0047e5d2009-02-02 13:54:31 -08003889 int err, i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003890
3891 netif_device_detach(ndev);
3892
3893 if (netif_running(ndev)) {
3894 err = ql_adapter_down(qdev);
3895 if (!err)
3896 return err;
3897 }
3898
Ron Mercer0047e5d2009-02-02 13:54:31 -08003899 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3900 netif_napi_del(&qdev->rx_ring[i].napi);
3901
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003902 err = pci_save_state(pdev);
3903 if (err)
3904 return err;
3905
3906 pci_disable_device(pdev);
3907
3908 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3909
3910 return 0;
3911}
3912
David S. Miller04da2cf2008-09-19 16:14:24 -07003913#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003914static int qlge_resume(struct pci_dev *pdev)
3915{
3916 struct net_device *ndev = pci_get_drvdata(pdev);
3917 struct ql_adapter *qdev = netdev_priv(ndev);
3918 int err;
3919
3920 pci_set_power_state(pdev, PCI_D0);
3921 pci_restore_state(pdev);
3922 err = pci_enable_device(pdev);
3923 if (err) {
3924 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3925 return err;
3926 }
3927 pci_set_master(pdev);
3928
3929 pci_enable_wake(pdev, PCI_D3hot, 0);
3930 pci_enable_wake(pdev, PCI_D3cold, 0);
3931
3932 if (netif_running(ndev)) {
3933 err = ql_adapter_up(qdev);
3934 if (err)
3935 return err;
3936 }
3937
3938 netif_device_attach(ndev);
3939
3940 return 0;
3941}
David S. Miller04da2cf2008-09-19 16:14:24 -07003942#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003943
3944static void qlge_shutdown(struct pci_dev *pdev)
3945{
3946 qlge_suspend(pdev, PMSG_SUSPEND);
3947}
3948
3949static struct pci_driver qlge_driver = {
3950 .name = DRV_NAME,
3951 .id_table = qlge_pci_tbl,
3952 .probe = qlge_probe,
3953 .remove = __devexit_p(qlge_remove),
3954#ifdef CONFIG_PM
3955 .suspend = qlge_suspend,
3956 .resume = qlge_resume,
3957#endif
3958 .shutdown = qlge_shutdown,
3959 .err_handler = &qlge_err_handler
3960};
3961
3962static int __init qlge_init_module(void)
3963{
3964 return pci_register_driver(&qlge_driver);
3965}
3966
3967static void __exit qlge_exit(void)
3968{
3969 pci_unregister_driver(&qlge_driver);
3970}
3971
3972module_init(qlge_init_module);
3973module_exit(qlge_exit);