blob: 35ba95a9b5db34705fc5bfabb9e08c4833d46c18 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000061/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040063/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000079 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040080 /* required last entry */
81 {0,}
82};
83
84MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
85
86/* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
89 */
90static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
91{
92 u32 sem_bits = 0;
93
94 switch (sem_mask) {
95 case SEM_XGMAC0_MASK:
96 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
97 break;
98 case SEM_XGMAC1_MASK:
99 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
100 break;
101 case SEM_ICB_MASK:
102 sem_bits = SEM_SET << SEM_ICB_SHIFT;
103 break;
104 case SEM_MAC_ADDR_MASK:
105 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
106 break;
107 case SEM_FLASH_MASK:
108 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
109 break;
110 case SEM_PROBE_MASK:
111 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
112 break;
113 case SEM_RT_IDX_MASK:
114 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
115 break;
116 case SEM_PROC_REG_MASK:
117 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
118 break;
119 default:
120 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
121 return -EINVAL;
122 }
123
124 ql_write32(qdev, SEM, sem_bits | sem_mask);
125 return !(ql_read32(qdev, SEM) & sem_bits);
126}
127
128int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
129{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000130 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400131 do {
132 if (!ql_sem_trylock(qdev, sem_mask))
133 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000134 udelay(100);
135 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400136 return -ETIMEDOUT;
137}
138
139void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
140{
141 ql_write32(qdev, SEM, sem_mask);
142 ql_read32(qdev, SEM); /* flush */
143}
144
145/* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 */
150int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
151{
152 u32 temp;
153 int count = UDELAY_COUNT;
154
155 while (count) {
156 temp = ql_read32(qdev, reg);
157
158 /* check for errors */
159 if (temp & err_bit) {
160 QPRINTK(qdev, PROBE, ALERT,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
162 reg, temp);
163 return -EIO;
164 } else if (temp & bit)
165 return 0;
166 udelay(UDELAY_DELAY);
167 count--;
168 }
169 QPRINTK(qdev, PROBE, ALERT,
170 "Timed out waiting for reg %x to come ready.\n", reg);
171 return -ETIMEDOUT;
172}
173
174/* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
176 */
177static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
178{
179 int count = UDELAY_COUNT;
180 u32 temp;
181
182 while (count) {
183 temp = ql_read32(qdev, CFG);
184 if (temp & CFG_LE)
185 return -EIO;
186 if (!(temp & bit))
187 return 0;
188 udelay(UDELAY_DELAY);
189 count--;
190 }
191 return -ETIMEDOUT;
192}
193
194
195/* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
197 */
198int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
199 u16 q_id)
200{
201 u64 map;
202 int status = 0;
203 int direction;
204 u32 mask;
205 u32 value;
206
207 direction =
208 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
209 PCI_DMA_FROMDEVICE;
210
211 map = pci_map_single(qdev->pdev, ptr, size, direction);
212 if (pci_dma_mapping_error(qdev->pdev, map)) {
213 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
214 return -ENOMEM;
215 }
216
Ron Mercer4322c5b2009-07-02 06:06:06 +0000217 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
218 if (status)
219 return status;
220
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400221 status = ql_wait_cfg(qdev, bit);
222 if (status) {
223 QPRINTK(qdev, IFUP, ERR,
224 "Timed out waiting for CFG to come ready.\n");
225 goto exit;
226 }
227
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400228 ql_write32(qdev, ICB_L, (u32) map);
229 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230
231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT);
233 ql_write32(qdev, CFG, (mask | value));
234
235 /*
236 * Wait for the bit to clear after signaling hw.
237 */
238 status = ql_wait_cfg(qdev, bit);
239exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000240 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 pci_unmap_single(qdev->pdev, map, size, direction);
242 return status;
243}
244
245/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
246int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
247 u32 *value)
248{
249 u32 offset = 0;
250 int status;
251
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400252 switch (type) {
253 case MAC_ADDR_TYPE_MULTI_MAC:
254 case MAC_ADDR_TYPE_CAM_MAC:
255 {
256 status =
257 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800258 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400259 if (status)
260 goto exit;
261 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
262 (index << MAC_ADDR_IDX_SHIFT) | /* index */
263 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
264 status =
265 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800266 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400267 if (status)
268 goto exit;
269 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
270 status =
271 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800272 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400273 if (status)
274 goto exit;
275 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276 (index << MAC_ADDR_IDX_SHIFT) | /* index */
277 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278 status =
279 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800280 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400281 if (status)
282 goto exit;
283 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
284 if (type == MAC_ADDR_TYPE_CAM_MAC) {
285 status =
286 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800287 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400288 if (status)
289 goto exit;
290 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
291 (index << MAC_ADDR_IDX_SHIFT) | /* index */
292 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293 status =
294 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800295 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400296 if (status)
297 goto exit;
298 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
299 }
300 break;
301 }
302 case MAC_ADDR_TYPE_VLAN:
303 case MAC_ADDR_TYPE_MULTI_FLTR:
304 default:
305 QPRINTK(qdev, IFUP, CRIT,
306 "Address type %d not yet supported.\n", type);
307 status = -EPERM;
308 }
309exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400310 return status;
311}
312
313/* Set up a MAC, multicast or VLAN address for the
314 * inbound frame matching.
315 */
316static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
317 u16 index)
318{
319 u32 offset = 0;
320 int status = 0;
321
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400322 switch (type) {
323 case MAC_ADDR_TYPE_MULTI_MAC:
324 case MAC_ADDR_TYPE_CAM_MAC:
325 {
326 u32 cam_output;
327 u32 upper = (addr[0] << 8) | addr[1];
328 u32 lower =
329 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
330 (addr[5]);
331
Ron Mercer49740972009-02-26 10:08:36 +0000332 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700333 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400334 " at index %d in the CAM.\n",
335 ((type ==
336 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700337 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338
339 status =
340 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800341 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400342 if (status)
343 goto exit;
344 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
345 (index << MAC_ADDR_IDX_SHIFT) | /* index */
346 type); /* type */
347 ql_write32(qdev, MAC_ADDR_DATA, lower);
348 status =
349 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800350 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400351 if (status)
352 goto exit;
353 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
354 (index << MAC_ADDR_IDX_SHIFT) | /* index */
355 type); /* type */
356 ql_write32(qdev, MAC_ADDR_DATA, upper);
357 status =
358 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800359 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400360 if (status)
361 goto exit;
362 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
363 (index << MAC_ADDR_IDX_SHIFT) | /* index */
364 type); /* type */
365 /* This field should also include the queue id
366 and possibly the function id. Right now we hardcode
367 the route field to NIC core.
368 */
369 if (type == MAC_ADDR_TYPE_CAM_MAC) {
370 cam_output = (CAM_OUT_ROUTE_NIC |
371 (qdev->
372 func << CAM_OUT_FUNC_SHIFT) |
373 (qdev->
374 rss_ring_first_cq_id <<
375 CAM_OUT_CQ_ID_SHIFT));
376 if (qdev->vlgrp)
377 cam_output |= CAM_OUT_RV;
378 /* route to NIC core */
379 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
380 }
381 break;
382 }
383 case MAC_ADDR_TYPE_VLAN:
384 {
385 u32 enable_bit = *((u32 *) &addr[0]);
386 /* For VLAN, the addr actually holds a bit that
387 * either enables or disables the vlan id we are
388 * addressing. It's either MAC_ADDR_E on or off.
389 * That's bit-27 we're talking about.
390 */
391 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
392 (enable_bit ? "Adding" : "Removing"),
393 index, (enable_bit ? "to" : "from"));
394
395 status =
396 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800397 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400398 if (status)
399 goto exit;
400 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
401 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 type | /* type */
403 enable_bit); /* enable/disable */
404 break;
405 }
406 case MAC_ADDR_TYPE_MULTI_FLTR:
407 default:
408 QPRINTK(qdev, IFUP, CRIT,
409 "Address type %d not yet supported.\n", type);
410 status = -EPERM;
411 }
412exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400413 return status;
414}
415
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000416/* Set or clear MAC address in hardware. We sometimes
417 * have to clear it to prevent wrong frame routing
418 * especially in a bonding environment.
419 */
420static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
421{
422 int status;
423 char zero_mac_addr[ETH_ALEN];
424 char *addr;
425
426 if (set) {
427 addr = &qdev->ndev->dev_addr[0];
428 QPRINTK(qdev, IFUP, DEBUG,
429 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
430 addr[0], addr[1], addr[2], addr[3],
431 addr[4], addr[5]);
432 } else {
433 memset(zero_mac_addr, 0, ETH_ALEN);
434 addr = &zero_mac_addr[0];
435 QPRINTK(qdev, IFUP, DEBUG,
436 "Clearing MAC address on %s\n",
437 qdev->ndev->name);
438 }
439 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
440 if (status)
441 return status;
442 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
443 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
444 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
445 if (status)
446 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
447 "address.\n");
448 return status;
449}
450
Ron Mercer6a473302009-07-02 06:06:12 +0000451void ql_link_on(struct ql_adapter *qdev)
452{
453 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
454 qdev->ndev->name);
455 netif_carrier_on(qdev->ndev);
456 ql_set_mac_addr(qdev, 1);
457}
458
459void ql_link_off(struct ql_adapter *qdev)
460{
461 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
462 qdev->ndev->name);
463 netif_carrier_off(qdev->ndev);
464 ql_set_mac_addr(qdev, 0);
465}
466
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400467/* Get a specific frame routing value from the CAM.
468 * Used for debug and reg dump.
469 */
470int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
471{
472 int status = 0;
473
Ron Mercer939678f2009-01-04 17:08:29 -0800474 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400475 if (status)
476 goto exit;
477
478 ql_write32(qdev, RT_IDX,
479 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800480 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400481 if (status)
482 goto exit;
483 *value = ql_read32(qdev, RT_DATA);
484exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400485 return status;
486}
487
488/* The NIC function for this chip has 16 routing indexes. Each one can be used
489 * to route different frame types to various inbound queues. We send broadcast/
490 * multicast/error frames to the default queue for slow handling,
491 * and CAM hit/RSS frames to the fast handling queues.
492 */
493static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
494 int enable)
495{
Ron Mercer8587ea32009-02-23 10:42:15 +0000496 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400497 u32 value = 0;
498
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400499 QPRINTK(qdev, IFUP, DEBUG,
500 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
501 (enable ? "Adding" : "Removing"),
502 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
503 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
504 ((index ==
505 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
506 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
507 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
508 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
509 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
510 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
511 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
512 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
513 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
514 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
515 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
516 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
517 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
518 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
519 (enable ? "to" : "from"));
520
521 switch (mask) {
522 case RT_IDX_CAM_HIT:
523 {
524 value = RT_IDX_DST_CAM_Q | /* dest */
525 RT_IDX_TYPE_NICQ | /* type */
526 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
527 break;
528 }
529 case RT_IDX_VALID: /* Promiscuous Mode frames. */
530 {
531 value = RT_IDX_DST_DFLT_Q | /* dest */
532 RT_IDX_TYPE_NICQ | /* type */
533 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
534 break;
535 }
536 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
537 {
538 value = RT_IDX_DST_DFLT_Q | /* dest */
539 RT_IDX_TYPE_NICQ | /* type */
540 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
541 break;
542 }
543 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
544 {
545 value = RT_IDX_DST_DFLT_Q | /* dest */
546 RT_IDX_TYPE_NICQ | /* type */
547 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
548 break;
549 }
550 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
551 {
552 value = RT_IDX_DST_CAM_Q | /* dest */
553 RT_IDX_TYPE_NICQ | /* type */
554 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
555 break;
556 }
557 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
558 {
559 value = RT_IDX_DST_CAM_Q | /* dest */
560 RT_IDX_TYPE_NICQ | /* type */
561 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
562 break;
563 }
564 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
565 {
566 value = RT_IDX_DST_RSS | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 break;
570 }
571 case 0: /* Clear the E-bit on an entry. */
572 {
573 value = RT_IDX_DST_DFLT_Q | /* dest */
574 RT_IDX_TYPE_NICQ | /* type */
575 (index << RT_IDX_IDX_SHIFT);/* index */
576 break;
577 }
578 default:
579 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
580 mask);
581 status = -EPERM;
582 goto exit;
583 }
584
585 if (value) {
586 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
587 if (status)
588 goto exit;
589 value |= (enable ? RT_IDX_E : 0);
590 ql_write32(qdev, RT_IDX, value);
591 ql_write32(qdev, RT_DATA, enable ? mask : 0);
592 }
593exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400594 return status;
595}
596
597static void ql_enable_interrupts(struct ql_adapter *qdev)
598{
599 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
600}
601
602static void ql_disable_interrupts(struct ql_adapter *qdev)
603{
604 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
605}
606
607/* If we're running with multiple MSI-X vectors then we enable on the fly.
608 * Otherwise, we may have multiple outstanding workers and don't want to
609 * enable until the last one finishes. In this case, the irq_cnt gets
610 * incremented everytime we queue a worker and decremented everytime
611 * a worker finishes. Once it hits zero we enable the interrupt.
612 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700613u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700615 u32 var = 0;
616 unsigned long hw_flags = 0;
617 struct intr_context *ctx = qdev->intr_context + intr;
618
619 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
620 /* Always enable if we're MSIX multi interrupts and
621 * it's not the default (zeroeth) interrupt.
622 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400623 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700624 ctx->intr_en_mask);
625 var = ql_read32(qdev, STS);
626 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400627 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700628
629 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
630 if (atomic_dec_and_test(&ctx->irq_cnt)) {
631 ql_write32(qdev, INTR_EN,
632 ctx->intr_en_mask);
633 var = ql_read32(qdev, STS);
634 }
635 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
636 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400637}
638
639static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
640{
641 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700642 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 /* HW disables for us if we're MSIX multi interrupts and
645 * it's not the default (zeroeth) interrupt.
646 */
647 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
648 return 0;
649
650 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000651 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700652 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400653 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700654 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400655 var = ql_read32(qdev, STS);
656 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700657 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000658 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400659 return var;
660}
661
662static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
663{
664 int i;
665 for (i = 0; i < qdev->intr_count; i++) {
666 /* The enable call does a atomic_dec_and_test
667 * and enables only if the result is zero.
668 * So we precharge it here.
669 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
671 i == 0))
672 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_enable_completion_interrupt(qdev, i);
674 }
675
676}
677
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000678static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
679{
680 int status, i;
681 u16 csum = 0;
682 __le16 *flash = (__le16 *)&qdev->flash;
683
684 status = strncmp((char *)&qdev->flash, str, 4);
685 if (status) {
686 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
687 return status;
688 }
689
690 for (i = 0; i < size; i++)
691 csum += le16_to_cpu(*flash++);
692
693 if (csum)
694 QPRINTK(qdev, IFUP, ERR,
695 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
696
697 return csum;
698}
699
Ron Mercer26351472009-02-02 13:53:57 -0800700static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400701{
702 int status = 0;
703 /* wait for reg to come ready */
704 status = ql_wait_reg_rdy(qdev,
705 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
706 if (status)
707 goto exit;
708 /* set up for reg read */
709 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
710 /* wait for reg to come ready */
711 status = ql_wait_reg_rdy(qdev,
712 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
713 if (status)
714 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800715 /* This data is stored on flash as an array of
716 * __le32. Since ql_read32() returns cpu endian
717 * we need to swap it back.
718 */
719 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400720exit:
721 return status;
722}
723
Ron Mercercdca8d02009-03-02 08:07:31 +0000724static int ql_get_8000_flash_params(struct ql_adapter *qdev)
725{
726 u32 i, size;
727 int status;
728 __le32 *p = (__le32 *)&qdev->flash;
729 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000730 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000731
732 /* Get flash offset for function and adjust
733 * for dword access.
734 */
Ron Mercere4552f52009-06-09 05:39:32 +0000735 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000736 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
737 else
738 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
739
740 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
741 return -ETIMEDOUT;
742
743 size = sizeof(struct flash_params_8000) / sizeof(u32);
744 for (i = 0; i < size; i++, p++) {
745 status = ql_read_flash_word(qdev, i+offset, p);
746 if (status) {
747 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
748 goto exit;
749 }
750 }
751
752 status = ql_validate_flash(qdev,
753 sizeof(struct flash_params_8000) / sizeof(u16),
754 "8000");
755 if (status) {
756 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
757 status = -EINVAL;
758 goto exit;
759 }
760
Ron Mercer542512e2009-06-09 05:39:33 +0000761 /* Extract either manufacturer or BOFM modified
762 * MAC address.
763 */
764 if (qdev->flash.flash_params_8000.data_type1 == 2)
765 memcpy(mac_addr,
766 qdev->flash.flash_params_8000.mac_addr1,
767 qdev->ndev->addr_len);
768 else
769 memcpy(mac_addr,
770 qdev->flash.flash_params_8000.mac_addr,
771 qdev->ndev->addr_len);
772
773 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000774 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
775 status = -EINVAL;
776 goto exit;
777 }
778
779 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000780 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000781 qdev->ndev->addr_len);
782
783exit:
784 ql_sem_unlock(qdev, SEM_FLASH_MASK);
785 return status;
786}
787
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000788static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400789{
790 int i;
791 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800792 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800793 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000794 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800795
796 /* Second function's parameters follow the first
797 * function's.
798 */
Ron Mercere4552f52009-06-09 05:39:32 +0000799 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000800 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400801
802 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
803 return -ETIMEDOUT;
804
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000805 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800806 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400807 if (status) {
808 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
809 goto exit;
810 }
811
812 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000813
814 status = ql_validate_flash(qdev,
815 sizeof(struct flash_params_8012) / sizeof(u16),
816 "8012");
817 if (status) {
818 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
819 status = -EINVAL;
820 goto exit;
821 }
822
823 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
824 status = -EINVAL;
825 goto exit;
826 }
827
828 memcpy(qdev->ndev->dev_addr,
829 qdev->flash.flash_params_8012.mac_addr,
830 qdev->ndev->addr_len);
831
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400832exit:
833 ql_sem_unlock(qdev, SEM_FLASH_MASK);
834 return status;
835}
836
837/* xgmac register are located behind the xgmac_addr and xgmac_data
838 * register pair. Each read/write requires us to wait for the ready
839 * bit before reading/writing the data.
840 */
841static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
842{
843 int status;
844 /* wait for reg to come ready */
845 status = ql_wait_reg_rdy(qdev,
846 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
847 if (status)
848 return status;
849 /* write the data to the data reg */
850 ql_write32(qdev, XGMAC_DATA, data);
851 /* trigger the write */
852 ql_write32(qdev, XGMAC_ADDR, reg);
853 return status;
854}
855
856/* xgmac register are located behind the xgmac_addr and xgmac_data
857 * register pair. Each read/write requires us to wait for the ready
858 * bit before reading/writing the data.
859 */
860int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
861{
862 int status = 0;
863 /* wait for reg to come ready */
864 status = ql_wait_reg_rdy(qdev,
865 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
866 if (status)
867 goto exit;
868 /* set up for reg read */
869 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
870 /* wait for reg to come ready */
871 status = ql_wait_reg_rdy(qdev,
872 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 if (status)
874 goto exit;
875 /* get the data */
876 *data = ql_read32(qdev, XGMAC_DATA);
877exit:
878 return status;
879}
880
881/* This is used for reading the 64-bit statistics regs. */
882int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
883{
884 int status = 0;
885 u32 hi = 0;
886 u32 lo = 0;
887
888 status = ql_read_xgmac_reg(qdev, reg, &lo);
889 if (status)
890 goto exit;
891
892 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
893 if (status)
894 goto exit;
895
896 *data = (u64) lo | ((u64) hi << 32);
897
898exit:
899 return status;
900}
901
Ron Mercercdca8d02009-03-02 08:07:31 +0000902static int ql_8000_port_initialize(struct ql_adapter *qdev)
903{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000904 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000905 /*
906 * Get MPI firmware version for driver banner
907 * and ethool info.
908 */
909 status = ql_mb_about_fw(qdev);
910 if (status)
911 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000912 status = ql_mb_get_fw_state(qdev);
913 if (status)
914 goto exit;
915 /* Wake up a worker to get/set the TX/RX frame sizes. */
916 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
917exit:
918 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000919}
920
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400921/* Take the MAC Core out of reset.
922 * Enable statistics counting.
923 * Take the transmitter/receiver out of reset.
924 * This functionality may be done in the MPI firmware at a
925 * later date.
926 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000927static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400928{
929 int status = 0;
930 u32 data;
931
932 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
933 /* Another function has the semaphore, so
934 * wait for the port init bit to come ready.
935 */
936 QPRINTK(qdev, LINK, INFO,
937 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
938 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
939 if (status) {
940 QPRINTK(qdev, LINK, CRIT,
941 "Port initialize timed out.\n");
942 }
943 return status;
944 }
945
946 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
947 /* Set the core reset. */
948 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
949 if (status)
950 goto end;
951 data |= GLOBAL_CFG_RESET;
952 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
953 if (status)
954 goto end;
955
956 /* Clear the core reset and turn on jumbo for receiver. */
957 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
958 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
959 data |= GLOBAL_CFG_TX_STAT_EN;
960 data |= GLOBAL_CFG_RX_STAT_EN;
961 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
962 if (status)
963 goto end;
964
965 /* Enable transmitter, and clear it's reset. */
966 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
967 if (status)
968 goto end;
969 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
970 data |= TX_CFG_EN; /* Enable the transmitter. */
971 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
972 if (status)
973 goto end;
974
975 /* Enable receiver and clear it's reset. */
976 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
977 if (status)
978 goto end;
979 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
980 data |= RX_CFG_EN; /* Enable the receiver. */
981 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
982 if (status)
983 goto end;
984
985 /* Turn on jumbo. */
986 status =
987 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
988 if (status)
989 goto end;
990 status =
991 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
992 if (status)
993 goto end;
994
995 /* Signal to the world that the port is enabled. */
996 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
997end:
998 ql_sem_unlock(qdev, qdev->xg_sem_mask);
999 return status;
1000}
1001
1002/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001003static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001004{
1005 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1006 rx_ring->lbq_curr_idx++;
1007 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1008 rx_ring->lbq_curr_idx = 0;
1009 rx_ring->lbq_free_cnt++;
1010 return lbq_desc;
1011}
1012
1013/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001014static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001015{
1016 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1017 rx_ring->sbq_curr_idx++;
1018 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1019 rx_ring->sbq_curr_idx = 0;
1020 rx_ring->sbq_free_cnt++;
1021 return sbq_desc;
1022}
1023
1024/* Update an rx ring index. */
1025static void ql_update_cq(struct rx_ring *rx_ring)
1026{
1027 rx_ring->cnsmr_idx++;
1028 rx_ring->curr_entry++;
1029 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1030 rx_ring->cnsmr_idx = 0;
1031 rx_ring->curr_entry = rx_ring->cq_base;
1032 }
1033}
1034
1035static void ql_write_cq_idx(struct rx_ring *rx_ring)
1036{
1037 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1038}
1039
1040/* Process (refill) a large buffer queue. */
1041static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1042{
Ron Mercer49f21862009-02-23 10:42:16 +00001043 u32 clean_idx = rx_ring->lbq_clean_idx;
1044 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001045 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001046 u64 map;
1047 int i;
1048
1049 while (rx_ring->lbq_free_cnt > 16) {
1050 for (i = 0; i < 16; i++) {
1051 QPRINTK(qdev, RX_STATUS, DEBUG,
1052 "lbq: try cleaning clean_idx = %d.\n",
1053 clean_idx);
1054 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001055 if (lbq_desc->p.lbq_page == NULL) {
1056 QPRINTK(qdev, RX_STATUS, DEBUG,
1057 "lbq: getting new page for index %d.\n",
1058 lbq_desc->index);
1059 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1060 if (lbq_desc->p.lbq_page == NULL) {
Ron Mercer79d2b292009-02-12 16:38:34 -08001061 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062 QPRINTK(qdev, RX_STATUS, ERR,
1063 "Couldn't get a page.\n");
1064 return;
1065 }
1066 map = pci_map_page(qdev->pdev,
1067 lbq_desc->p.lbq_page,
1068 0, PAGE_SIZE,
1069 PCI_DMA_FROMDEVICE);
1070 if (pci_dma_mapping_error(qdev->pdev, map)) {
Ron Mercer79d2b292009-02-12 16:38:34 -08001071 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerf2603c22009-02-12 16:37:32 -08001072 put_page(lbq_desc->p.lbq_page);
1073 lbq_desc->p.lbq_page = NULL;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001074 QPRINTK(qdev, RX_STATUS, ERR,
1075 "PCI mapping failed.\n");
1076 return;
1077 }
1078 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1079 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001080 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001081 }
1082 clean_idx++;
1083 if (clean_idx == rx_ring->lbq_len)
1084 clean_idx = 0;
1085 }
1086
1087 rx_ring->lbq_clean_idx = clean_idx;
1088 rx_ring->lbq_prod_idx += 16;
1089 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1090 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001091 rx_ring->lbq_free_cnt -= 16;
1092 }
1093
1094 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001095 QPRINTK(qdev, RX_STATUS, DEBUG,
1096 "lbq: updating prod idx = %d.\n",
1097 rx_ring->lbq_prod_idx);
1098 ql_write_db_reg(rx_ring->lbq_prod_idx,
1099 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001100 }
1101}
1102
1103/* Process (refill) a small buffer queue. */
1104static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1105{
Ron Mercer49f21862009-02-23 10:42:16 +00001106 u32 clean_idx = rx_ring->sbq_clean_idx;
1107 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001108 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001109 u64 map;
1110 int i;
1111
1112 while (rx_ring->sbq_free_cnt > 16) {
1113 for (i = 0; i < 16; i++) {
1114 sbq_desc = &rx_ring->sbq[clean_idx];
1115 QPRINTK(qdev, RX_STATUS, DEBUG,
1116 "sbq: try cleaning clean_idx = %d.\n",
1117 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001118 if (sbq_desc->p.skb == NULL) {
1119 QPRINTK(qdev, RX_STATUS, DEBUG,
1120 "sbq: getting new skb for index %d.\n",
1121 sbq_desc->index);
1122 sbq_desc->p.skb =
1123 netdev_alloc_skb(qdev->ndev,
1124 rx_ring->sbq_buf_size);
1125 if (sbq_desc->p.skb == NULL) {
1126 QPRINTK(qdev, PROBE, ERR,
1127 "Couldn't get an skb.\n");
1128 rx_ring->sbq_clean_idx = clean_idx;
1129 return;
1130 }
1131 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1132 map = pci_map_single(qdev->pdev,
1133 sbq_desc->p.skb->data,
1134 rx_ring->sbq_buf_size /
1135 2, PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001136 if (pci_dma_mapping_error(qdev->pdev, map)) {
1137 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1138 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001139 dev_kfree_skb_any(sbq_desc->p.skb);
1140 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001141 return;
1142 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001143 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1144 pci_unmap_len_set(sbq_desc, maplen,
1145 rx_ring->sbq_buf_size / 2);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001146 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001147 }
1148
1149 clean_idx++;
1150 if (clean_idx == rx_ring->sbq_len)
1151 clean_idx = 0;
1152 }
1153 rx_ring->sbq_clean_idx = clean_idx;
1154 rx_ring->sbq_prod_idx += 16;
1155 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1156 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001157 rx_ring->sbq_free_cnt -= 16;
1158 }
1159
1160 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001161 QPRINTK(qdev, RX_STATUS, DEBUG,
1162 "sbq: updating prod idx = %d.\n",
1163 rx_ring->sbq_prod_idx);
1164 ql_write_db_reg(rx_ring->sbq_prod_idx,
1165 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001166 }
1167}
1168
1169static void ql_update_buffer_queues(struct ql_adapter *qdev,
1170 struct rx_ring *rx_ring)
1171{
1172 ql_update_sbq(qdev, rx_ring);
1173 ql_update_lbq(qdev, rx_ring);
1174}
1175
1176/* Unmaps tx buffers. Can be called from send() if a pci mapping
1177 * fails at some stage, or from the interrupt when a tx completes.
1178 */
1179static void ql_unmap_send(struct ql_adapter *qdev,
1180 struct tx_ring_desc *tx_ring_desc, int mapped)
1181{
1182 int i;
1183 for (i = 0; i < mapped; i++) {
1184 if (i == 0 || (i == 7 && mapped > 7)) {
1185 /*
1186 * Unmap the skb->data area, or the
1187 * external sglist (AKA the Outbound
1188 * Address List (OAL)).
1189 * If its the zeroeth element, then it's
1190 * the skb->data area. If it's the 7th
1191 * element and there is more than 6 frags,
1192 * then its an OAL.
1193 */
1194 if (i == 7) {
1195 QPRINTK(qdev, TX_DONE, DEBUG,
1196 "unmapping OAL area.\n");
1197 }
1198 pci_unmap_single(qdev->pdev,
1199 pci_unmap_addr(&tx_ring_desc->map[i],
1200 mapaddr),
1201 pci_unmap_len(&tx_ring_desc->map[i],
1202 maplen),
1203 PCI_DMA_TODEVICE);
1204 } else {
1205 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1206 i);
1207 pci_unmap_page(qdev->pdev,
1208 pci_unmap_addr(&tx_ring_desc->map[i],
1209 mapaddr),
1210 pci_unmap_len(&tx_ring_desc->map[i],
1211 maplen), PCI_DMA_TODEVICE);
1212 }
1213 }
1214
1215}
1216
1217/* Map the buffers for this transmit. This will return
1218 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1219 */
1220static int ql_map_send(struct ql_adapter *qdev,
1221 struct ob_mac_iocb_req *mac_iocb_ptr,
1222 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1223{
1224 int len = skb_headlen(skb);
1225 dma_addr_t map;
1226 int frag_idx, err, map_idx = 0;
1227 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1228 int frag_cnt = skb_shinfo(skb)->nr_frags;
1229
1230 if (frag_cnt) {
1231 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1232 }
1233 /*
1234 * Map the skb buffer first.
1235 */
1236 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1237
1238 err = pci_dma_mapping_error(qdev->pdev, map);
1239 if (err) {
1240 QPRINTK(qdev, TX_QUEUED, ERR,
1241 "PCI mapping failed with error: %d\n", err);
1242
1243 return NETDEV_TX_BUSY;
1244 }
1245
1246 tbd->len = cpu_to_le32(len);
1247 tbd->addr = cpu_to_le64(map);
1248 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1249 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1250 map_idx++;
1251
1252 /*
1253 * This loop fills the remainder of the 8 address descriptors
1254 * in the IOCB. If there are more than 7 fragments, then the
1255 * eighth address desc will point to an external list (OAL).
1256 * When this happens, the remainder of the frags will be stored
1257 * in this list.
1258 */
1259 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1260 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1261 tbd++;
1262 if (frag_idx == 6 && frag_cnt > 7) {
1263 /* Let's tack on an sglist.
1264 * Our control block will now
1265 * look like this:
1266 * iocb->seg[0] = skb->data
1267 * iocb->seg[1] = frag[0]
1268 * iocb->seg[2] = frag[1]
1269 * iocb->seg[3] = frag[2]
1270 * iocb->seg[4] = frag[3]
1271 * iocb->seg[5] = frag[4]
1272 * iocb->seg[6] = frag[5]
1273 * iocb->seg[7] = ptr to OAL (external sglist)
1274 * oal->seg[0] = frag[6]
1275 * oal->seg[1] = frag[7]
1276 * oal->seg[2] = frag[8]
1277 * oal->seg[3] = frag[9]
1278 * oal->seg[4] = frag[10]
1279 * etc...
1280 */
1281 /* Tack on the OAL in the eighth segment of IOCB. */
1282 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1283 sizeof(struct oal),
1284 PCI_DMA_TODEVICE);
1285 err = pci_dma_mapping_error(qdev->pdev, map);
1286 if (err) {
1287 QPRINTK(qdev, TX_QUEUED, ERR,
1288 "PCI mapping outbound address list with error: %d\n",
1289 err);
1290 goto map_error;
1291 }
1292
1293 tbd->addr = cpu_to_le64(map);
1294 /*
1295 * The length is the number of fragments
1296 * that remain to be mapped times the length
1297 * of our sglist (OAL).
1298 */
1299 tbd->len =
1300 cpu_to_le32((sizeof(struct tx_buf_desc) *
1301 (frag_cnt - frag_idx)) | TX_DESC_C);
1302 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1303 map);
1304 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1305 sizeof(struct oal));
1306 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1307 map_idx++;
1308 }
1309
1310 map =
1311 pci_map_page(qdev->pdev, frag->page,
1312 frag->page_offset, frag->size,
1313 PCI_DMA_TODEVICE);
1314
1315 err = pci_dma_mapping_error(qdev->pdev, map);
1316 if (err) {
1317 QPRINTK(qdev, TX_QUEUED, ERR,
1318 "PCI mapping frags failed with error: %d.\n",
1319 err);
1320 goto map_error;
1321 }
1322
1323 tbd->addr = cpu_to_le64(map);
1324 tbd->len = cpu_to_le32(frag->size);
1325 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1326 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1327 frag->size);
1328
1329 }
1330 /* Save the number of segments we've mapped. */
1331 tx_ring_desc->map_cnt = map_idx;
1332 /* Terminate the last segment. */
1333 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1334 return NETDEV_TX_OK;
1335
1336map_error:
1337 /*
1338 * If the first frag mapping failed, then i will be zero.
1339 * This causes the unmap of the skb->data area. Otherwise
1340 * we pass in the number of frags that mapped successfully
1341 * so they can be umapped.
1342 */
1343 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1344 return NETDEV_TX_BUSY;
1345}
1346
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001347static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001348{
1349 void *temp_addr = skb->data;
1350
1351 /* Undo the skb_reserve(skb,32) we did before
1352 * giving to hardware, and realign data on
1353 * a 2-byte boundary.
1354 */
1355 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1356 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1357 skb_copy_to_linear_data(skb, temp_addr,
1358 (unsigned int)len);
1359}
1360
1361/*
1362 * This function builds an skb for the given inbound
1363 * completion. It will be rewritten for readability in the near
1364 * future, but for not it works well.
1365 */
1366static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1367 struct rx_ring *rx_ring,
1368 struct ib_mac_iocb_rsp *ib_mac_rsp)
1369{
1370 struct bq_desc *lbq_desc;
1371 struct bq_desc *sbq_desc;
1372 struct sk_buff *skb = NULL;
1373 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1374 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1375
1376 /*
1377 * Handle the header buffer if present.
1378 */
1379 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1380 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1381 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1382 /*
1383 * Headers fit nicely into a small buffer.
1384 */
1385 sbq_desc = ql_get_curr_sbuf(rx_ring);
1386 pci_unmap_single(qdev->pdev,
1387 pci_unmap_addr(sbq_desc, mapaddr),
1388 pci_unmap_len(sbq_desc, maplen),
1389 PCI_DMA_FROMDEVICE);
1390 skb = sbq_desc->p.skb;
1391 ql_realign_skb(skb, hdr_len);
1392 skb_put(skb, hdr_len);
1393 sbq_desc->p.skb = NULL;
1394 }
1395
1396 /*
1397 * Handle the data buffer(s).
1398 */
1399 if (unlikely(!length)) { /* Is there data too? */
1400 QPRINTK(qdev, RX_STATUS, DEBUG,
1401 "No Data buffer in this packet.\n");
1402 return skb;
1403 }
1404
1405 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1406 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1407 QPRINTK(qdev, RX_STATUS, DEBUG,
1408 "Headers in small, data of %d bytes in small, combine them.\n", length);
1409 /*
1410 * Data is less than small buffer size so it's
1411 * stuffed in a small buffer.
1412 * For this case we append the data
1413 * from the "data" small buffer to the "header" small
1414 * buffer.
1415 */
1416 sbq_desc = ql_get_curr_sbuf(rx_ring);
1417 pci_dma_sync_single_for_cpu(qdev->pdev,
1418 pci_unmap_addr
1419 (sbq_desc, mapaddr),
1420 pci_unmap_len
1421 (sbq_desc, maplen),
1422 PCI_DMA_FROMDEVICE);
1423 memcpy(skb_put(skb, length),
1424 sbq_desc->p.skb->data, length);
1425 pci_dma_sync_single_for_device(qdev->pdev,
1426 pci_unmap_addr
1427 (sbq_desc,
1428 mapaddr),
1429 pci_unmap_len
1430 (sbq_desc,
1431 maplen),
1432 PCI_DMA_FROMDEVICE);
1433 } else {
1434 QPRINTK(qdev, RX_STATUS, DEBUG,
1435 "%d bytes in a single small buffer.\n", length);
1436 sbq_desc = ql_get_curr_sbuf(rx_ring);
1437 skb = sbq_desc->p.skb;
1438 ql_realign_skb(skb, length);
1439 skb_put(skb, length);
1440 pci_unmap_single(qdev->pdev,
1441 pci_unmap_addr(sbq_desc,
1442 mapaddr),
1443 pci_unmap_len(sbq_desc,
1444 maplen),
1445 PCI_DMA_FROMDEVICE);
1446 sbq_desc->p.skb = NULL;
1447 }
1448 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1449 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1450 QPRINTK(qdev, RX_STATUS, DEBUG,
1451 "Header in small, %d bytes in large. Chain large to small!\n", length);
1452 /*
1453 * The data is in a single large buffer. We
1454 * chain it to the header buffer's skb and let
1455 * it rip.
1456 */
1457 lbq_desc = ql_get_curr_lbuf(rx_ring);
1458 pci_unmap_page(qdev->pdev,
1459 pci_unmap_addr(lbq_desc,
1460 mapaddr),
1461 pci_unmap_len(lbq_desc, maplen),
1462 PCI_DMA_FROMDEVICE);
1463 QPRINTK(qdev, RX_STATUS, DEBUG,
1464 "Chaining page to skb.\n");
1465 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1466 0, length);
1467 skb->len += length;
1468 skb->data_len += length;
1469 skb->truesize += length;
1470 lbq_desc->p.lbq_page = NULL;
1471 } else {
1472 /*
1473 * The headers and data are in a single large buffer. We
1474 * copy it to a new skb and let it go. This can happen with
1475 * jumbo mtu on a non-TCP/UDP frame.
1476 */
1477 lbq_desc = ql_get_curr_lbuf(rx_ring);
1478 skb = netdev_alloc_skb(qdev->ndev, length);
1479 if (skb == NULL) {
1480 QPRINTK(qdev, PROBE, DEBUG,
1481 "No skb available, drop the packet.\n");
1482 return NULL;
1483 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001484 pci_unmap_page(qdev->pdev,
1485 pci_unmap_addr(lbq_desc,
1486 mapaddr),
1487 pci_unmap_len(lbq_desc, maplen),
1488 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001489 skb_reserve(skb, NET_IP_ALIGN);
1490 QPRINTK(qdev, RX_STATUS, DEBUG,
1491 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1492 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1493 0, length);
1494 skb->len += length;
1495 skb->data_len += length;
1496 skb->truesize += length;
1497 length -= length;
1498 lbq_desc->p.lbq_page = NULL;
1499 __pskb_pull_tail(skb,
1500 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1501 VLAN_ETH_HLEN : ETH_HLEN);
1502 }
1503 } else {
1504 /*
1505 * The data is in a chain of large buffers
1506 * pointed to by a small buffer. We loop
1507 * thru and chain them to the our small header
1508 * buffer's skb.
1509 * frags: There are 18 max frags and our small
1510 * buffer will hold 32 of them. The thing is,
1511 * we'll use 3 max for our 9000 byte jumbo
1512 * frames. If the MTU goes up we could
1513 * eventually be in trouble.
1514 */
1515 int size, offset, i = 0;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001516 __le64 *bq, bq_array[8];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001517 sbq_desc = ql_get_curr_sbuf(rx_ring);
1518 pci_unmap_single(qdev->pdev,
1519 pci_unmap_addr(sbq_desc, mapaddr),
1520 pci_unmap_len(sbq_desc, maplen),
1521 PCI_DMA_FROMDEVICE);
1522 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1523 /*
1524 * This is an non TCP/UDP IP frame, so
1525 * the headers aren't split into a small
1526 * buffer. We have to use the small buffer
1527 * that contains our sg list as our skb to
1528 * send upstairs. Copy the sg list here to
1529 * a local buffer and use it to find the
1530 * pages to chain.
1531 */
1532 QPRINTK(qdev, RX_STATUS, DEBUG,
1533 "%d bytes of headers & data in chain of large.\n", length);
1534 skb = sbq_desc->p.skb;
1535 bq = &bq_array[0];
1536 memcpy(bq, skb->data, sizeof(bq_array));
1537 sbq_desc->p.skb = NULL;
1538 skb_reserve(skb, NET_IP_ALIGN);
1539 } else {
1540 QPRINTK(qdev, RX_STATUS, DEBUG,
1541 "Headers in small, %d bytes of data in chain of large.\n", length);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001542 bq = (__le64 *)sbq_desc->p.skb->data;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001543 }
1544 while (length > 0) {
1545 lbq_desc = ql_get_curr_lbuf(rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001546 pci_unmap_page(qdev->pdev,
1547 pci_unmap_addr(lbq_desc,
1548 mapaddr),
1549 pci_unmap_len(lbq_desc,
1550 maplen),
1551 PCI_DMA_FROMDEVICE);
1552 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1553 offset = 0;
1554
1555 QPRINTK(qdev, RX_STATUS, DEBUG,
1556 "Adding page %d to skb for %d bytes.\n",
1557 i, size);
1558 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1559 offset, size);
1560 skb->len += size;
1561 skb->data_len += size;
1562 skb->truesize += size;
1563 length -= size;
1564 lbq_desc->p.lbq_page = NULL;
1565 bq++;
1566 i++;
1567 }
1568 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1569 VLAN_ETH_HLEN : ETH_HLEN);
1570 }
1571 return skb;
1572}
1573
1574/* Process an inbound completion from an rx ring. */
1575static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1576 struct rx_ring *rx_ring,
1577 struct ib_mac_iocb_rsp *ib_mac_rsp)
1578{
1579 struct net_device *ndev = qdev->ndev;
1580 struct sk_buff *skb = NULL;
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001581 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1582 IB_MAC_IOCB_RSP_VLAN_MASK)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001583
1584 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1585
1586 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1587 if (unlikely(!skb)) {
1588 QPRINTK(qdev, RX_STATUS, DEBUG,
1589 "No skb available, drop packet.\n");
1590 return;
1591 }
1592
Ron Mercera32959c2009-06-09 05:39:27 +00001593 /* Frame error, so drop the packet. */
1594 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1595 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1596 ib_mac_rsp->flags2);
1597 dev_kfree_skb_any(skb);
1598 return;
1599 }
Ron Mercerec33a492009-06-09 05:39:28 +00001600
1601 /* The max framesize filter on this chip is set higher than
1602 * MTU since FCoE uses 2k frames.
1603 */
1604 if (skb->len > ndev->mtu + ETH_HLEN) {
1605 dev_kfree_skb_any(skb);
1606 return;
1607 }
1608
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001609 prefetch(skb->data);
1610 skb->dev = ndev;
1611 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1612 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1613 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1614 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1615 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1616 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1617 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1618 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1619 }
1620 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1621 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1622 }
Ron Mercerd555f592009-03-09 10:59:19 +00001623
Ron Mercerd555f592009-03-09 10:59:19 +00001624 skb->protocol = eth_type_trans(skb, ndev);
1625 skb->ip_summed = CHECKSUM_NONE;
1626
1627 /* If rx checksum is on, and there are no
1628 * csum or frame errors.
1629 */
1630 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001631 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1632 /* TCP frame. */
1633 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1634 QPRINTK(qdev, RX_STATUS, DEBUG,
1635 "TCP checksum done!\n");
1636 skb->ip_summed = CHECKSUM_UNNECESSARY;
1637 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1638 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1639 /* Unfragmented ipv4 UDP frame. */
1640 struct iphdr *iph = (struct iphdr *) skb->data;
1641 if (!(iph->frag_off &
1642 cpu_to_be16(IP_MF|IP_OFFSET))) {
1643 skb->ip_summed = CHECKSUM_UNNECESSARY;
1644 QPRINTK(qdev, RX_STATUS, DEBUG,
1645 "TCP checksum done!\n");
1646 }
1647 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001648 }
Ron Mercerd555f592009-03-09 10:59:19 +00001649
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001650 qdev->stats.rx_packets++;
1651 qdev->stats.rx_bytes += skb->len;
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001652 skb_record_rx_queue(skb,
1653 rx_ring->cq_id - qdev->rss_ring_first_cq_id);
1654 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1655 if (qdev->vlgrp &&
1656 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1657 (vlan_id != 0))
1658 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1659 vlan_id, skb);
1660 else
1661 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001662 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001663 if (qdev->vlgrp &&
1664 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1665 (vlan_id != 0))
1666 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1667 else
1668 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001669 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001670}
1671
1672/* Process an outbound completion from an rx ring. */
1673static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1674 struct ob_mac_iocb_rsp *mac_rsp)
1675{
1676 struct tx_ring *tx_ring;
1677 struct tx_ring_desc *tx_ring_desc;
1678
1679 QL_DUMP_OB_MAC_RSP(mac_rsp);
1680 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1681 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1682 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer13cfd5b2009-07-02 06:06:10 +00001683 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001684 qdev->stats.tx_packets++;
1685 dev_kfree_skb(tx_ring_desc->skb);
1686 tx_ring_desc->skb = NULL;
1687
1688 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1689 OB_MAC_IOCB_RSP_S |
1690 OB_MAC_IOCB_RSP_L |
1691 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1692 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1693 QPRINTK(qdev, TX_DONE, WARNING,
1694 "Total descriptor length did not match transfer length.\n");
1695 }
1696 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1697 QPRINTK(qdev, TX_DONE, WARNING,
1698 "Frame too short to be legal, not sent.\n");
1699 }
1700 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1701 QPRINTK(qdev, TX_DONE, WARNING,
1702 "Frame too long, but sent anyway.\n");
1703 }
1704 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1705 QPRINTK(qdev, TX_DONE, WARNING,
1706 "PCI backplane error. Frame not sent.\n");
1707 }
1708 }
1709 atomic_inc(&tx_ring->tx_count);
1710}
1711
1712/* Fire up a handler to reset the MPI processor. */
1713void ql_queue_fw_error(struct ql_adapter *qdev)
1714{
Ron Mercer6a473302009-07-02 06:06:12 +00001715 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001716 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1717}
1718
1719void ql_queue_asic_error(struct ql_adapter *qdev)
1720{
Ron Mercer6a473302009-07-02 06:06:12 +00001721 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001722 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001723 /* Clear adapter up bit to signal the recovery
1724 * process that it shouldn't kill the reset worker
1725 * thread
1726 */
1727 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001728 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1729}
1730
1731static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1732 struct ib_ae_iocb_rsp *ib_ae_rsp)
1733{
1734 switch (ib_ae_rsp->event) {
1735 case MGMT_ERR_EVENT:
1736 QPRINTK(qdev, RX_ERR, ERR,
1737 "Management Processor Fatal Error.\n");
1738 ql_queue_fw_error(qdev);
1739 return;
1740
1741 case CAM_LOOKUP_ERR_EVENT:
1742 QPRINTK(qdev, LINK, ERR,
1743 "Multiple CAM hits lookup occurred.\n");
1744 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1745 ql_queue_asic_error(qdev);
1746 return;
1747
1748 case SOFT_ECC_ERROR_EVENT:
1749 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1750 ql_queue_asic_error(qdev);
1751 break;
1752
1753 case PCI_ERR_ANON_BUF_RD:
1754 QPRINTK(qdev, RX_ERR, ERR,
1755 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1756 ib_ae_rsp->q_id);
1757 ql_queue_asic_error(qdev);
1758 break;
1759
1760 default:
1761 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1762 ib_ae_rsp->event);
1763 ql_queue_asic_error(qdev);
1764 break;
1765 }
1766}
1767
1768static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1769{
1770 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001771 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001772 struct ob_mac_iocb_rsp *net_rsp = NULL;
1773 int count = 0;
1774
Ron Mercer1e213302009-03-09 10:59:21 +00001775 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001776 /* While there are entries in the completion queue. */
1777 while (prod != rx_ring->cnsmr_idx) {
1778
1779 QPRINTK(qdev, RX_STATUS, DEBUG,
1780 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1781 prod, rx_ring->cnsmr_idx);
1782
1783 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1784 rmb();
1785 switch (net_rsp->opcode) {
1786
1787 case OPCODE_OB_MAC_TSO_IOCB:
1788 case OPCODE_OB_MAC_IOCB:
1789 ql_process_mac_tx_intr(qdev, net_rsp);
1790 break;
1791 default:
1792 QPRINTK(qdev, RX_STATUS, DEBUG,
1793 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1794 net_rsp->opcode);
1795 }
1796 count++;
1797 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001798 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001799 }
1800 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00001801 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1802 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1803 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001804 if (atomic_read(&tx_ring->queue_stopped) &&
1805 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1806 /*
1807 * The queue got stopped because the tx_ring was full.
1808 * Wake it up, because it's now at least 25% empty.
1809 */
Ron Mercer1e213302009-03-09 10:59:21 +00001810 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001811 }
1812
1813 return count;
1814}
1815
1816static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1817{
1818 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001819 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001820 struct ql_net_rsp_iocb *net_rsp;
1821 int count = 0;
1822
1823 /* While there are entries in the completion queue. */
1824 while (prod != rx_ring->cnsmr_idx) {
1825
1826 QPRINTK(qdev, RX_STATUS, DEBUG,
1827 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1828 prod, rx_ring->cnsmr_idx);
1829
1830 net_rsp = rx_ring->curr_entry;
1831 rmb();
1832 switch (net_rsp->opcode) {
1833 case OPCODE_IB_MAC_IOCB:
1834 ql_process_mac_rx_intr(qdev, rx_ring,
1835 (struct ib_mac_iocb_rsp *)
1836 net_rsp);
1837 break;
1838
1839 case OPCODE_IB_AE_IOCB:
1840 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1841 net_rsp);
1842 break;
1843 default:
1844 {
1845 QPRINTK(qdev, RX_STATUS, DEBUG,
1846 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1847 net_rsp->opcode);
1848 }
1849 }
1850 count++;
1851 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001852 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001853 if (count == budget)
1854 break;
1855 }
1856 ql_update_buffer_queues(qdev, rx_ring);
1857 ql_write_cq_idx(rx_ring);
1858 return count;
1859}
1860
1861static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1862{
1863 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1864 struct ql_adapter *qdev = rx_ring->qdev;
1865 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1866
1867 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1868 rx_ring->cq_id);
1869
1870 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001871 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001872 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1873 }
1874 return work_done;
1875}
1876
1877static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1878{
1879 struct ql_adapter *qdev = netdev_priv(ndev);
1880
1881 qdev->vlgrp = grp;
1882 if (grp) {
1883 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1884 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1885 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1886 } else {
1887 QPRINTK(qdev, IFUP, DEBUG,
1888 "Turning off VLAN in NIC_RCV_CFG.\n");
1889 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1890 }
1891}
1892
1893static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1894{
1895 struct ql_adapter *qdev = netdev_priv(ndev);
1896 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00001897 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001898
Ron Mercercc288f52009-02-23 10:42:14 +00001899 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1900 if (status)
1901 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001902 spin_lock(&qdev->hw_lock);
1903 if (ql_set_mac_addr_reg
1904 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1905 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1906 }
1907 spin_unlock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00001908 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001909}
1910
1911static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1912{
1913 struct ql_adapter *qdev = netdev_priv(ndev);
1914 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00001915 int status;
1916
1917 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1918 if (status)
1919 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001920
1921 spin_lock(&qdev->hw_lock);
1922 if (ql_set_mac_addr_reg
1923 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1924 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1925 }
1926 spin_unlock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00001927 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001928
1929}
1930
1931/* Worker thread to process a given rx_ring that is dedicated
1932 * to outbound completions.
1933 */
1934static void ql_tx_clean(struct work_struct *work)
1935{
1936 struct rx_ring *rx_ring =
1937 container_of(work, struct rx_ring, rx_work.work);
1938 ql_clean_outbound_rx_ring(rx_ring);
1939 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1940
1941}
1942
1943/* Worker thread to process a given rx_ring that is dedicated
1944 * to inbound completions.
1945 */
1946static void ql_rx_clean(struct work_struct *work)
1947{
1948 struct rx_ring *rx_ring =
1949 container_of(work, struct rx_ring, rx_work.work);
1950 ql_clean_inbound_rx_ring(rx_ring, 64);
1951 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1952}
1953
1954/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1955static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1956{
1957 struct rx_ring *rx_ring = dev_id;
1958 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1959 &rx_ring->rx_work, 0);
1960 return IRQ_HANDLED;
1961}
1962
1963/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1964static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1965{
1966 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08001967 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001968 return IRQ_HANDLED;
1969}
1970
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001971/* This handles a fatal error, MPI activity, and the default
1972 * rx_ring in an MSI-X multiple vector environment.
1973 * In MSI/Legacy environment it also process the rest of
1974 * the rx_rings.
1975 */
1976static irqreturn_t qlge_isr(int irq, void *dev_id)
1977{
1978 struct rx_ring *rx_ring = dev_id;
1979 struct ql_adapter *qdev = rx_ring->qdev;
1980 struct intr_context *intr_context = &qdev->intr_context[0];
1981 u32 var;
1982 int i;
1983 int work_done = 0;
1984
Ron Mercerbb0d2152008-10-20 10:30:26 -07001985 spin_lock(&qdev->hw_lock);
1986 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1987 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1988 spin_unlock(&qdev->hw_lock);
1989 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001990 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07001991 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001992
Ron Mercerbb0d2152008-10-20 10:30:26 -07001993 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001994
1995 /*
1996 * Check for fatal error.
1997 */
1998 if (var & STS_FE) {
1999 ql_queue_asic_error(qdev);
2000 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2001 var = ql_read32(qdev, ERR_STS);
2002 QPRINTK(qdev, INTR, ERR,
2003 "Resetting chip. Error Status Register = 0x%x\n", var);
2004 return IRQ_HANDLED;
2005 }
2006
2007 /*
2008 * Check MPI processor activity.
2009 */
2010 if (var & STS_PI) {
2011 /*
2012 * We've got an async event or mailbox completion.
2013 * Handle it and clear the source of the interrupt.
2014 */
2015 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2016 ql_disable_completion_interrupt(qdev, intr_context->intr);
2017 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
2018 &qdev->mpi_work, 0);
2019 work_done++;
2020 }
2021
2022 /*
2023 * Check the default queue and wake handler if active.
2024 */
2025 rx_ring = &qdev->rx_ring[0];
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002026 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002027 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
2028 ql_disable_completion_interrupt(qdev, intr_context->intr);
2029 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
2030 &rx_ring->rx_work, 0);
2031 work_done++;
2032 }
2033
2034 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2035 /*
2036 * Start the DPC for each active queue.
2037 */
2038 for (i = 1; i < qdev->rx_ring_count; i++) {
2039 rx_ring = &qdev->rx_ring[i];
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002040 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002041 rx_ring->cnsmr_idx) {
2042 QPRINTK(qdev, INTR, INFO,
2043 "Waking handler for rx_ring[%d].\n", i);
2044 ql_disable_completion_interrupt(qdev,
2045 intr_context->
2046 intr);
2047 if (i < qdev->rss_ring_first_cq_id)
2048 queue_delayed_work_on(rx_ring->cpu,
2049 qdev->q_workqueue,
2050 &rx_ring->rx_work,
2051 0);
2052 else
Ben Hutchings288379f2009-01-19 16:43:59 -08002053 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002054 work_done++;
2055 }
2056 }
2057 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002058 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002059 return work_done ? IRQ_HANDLED : IRQ_NONE;
2060}
2061
2062static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2063{
2064
2065 if (skb_is_gso(skb)) {
2066 int err;
2067 if (skb_header_cloned(skb)) {
2068 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2069 if (err)
2070 return err;
2071 }
2072
2073 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2074 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2075 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2076 mac_iocb_ptr->total_hdrs_len =
2077 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2078 mac_iocb_ptr->net_trans_offset =
2079 cpu_to_le16(skb_network_offset(skb) |
2080 skb_transport_offset(skb)
2081 << OB_MAC_TRANSPORT_HDR_SHIFT);
2082 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2083 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2084 if (likely(skb->protocol == htons(ETH_P_IP))) {
2085 struct iphdr *iph = ip_hdr(skb);
2086 iph->check = 0;
2087 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2088 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2089 iph->daddr, 0,
2090 IPPROTO_TCP,
2091 0);
2092 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2093 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2094 tcp_hdr(skb)->check =
2095 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2096 &ipv6_hdr(skb)->daddr,
2097 0, IPPROTO_TCP, 0);
2098 }
2099 return 1;
2100 }
2101 return 0;
2102}
2103
2104static void ql_hw_csum_setup(struct sk_buff *skb,
2105 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2106{
2107 int len;
2108 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002109 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002110 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2111 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2112 mac_iocb_ptr->net_trans_offset =
2113 cpu_to_le16(skb_network_offset(skb) |
2114 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2115
2116 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2117 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2118 if (likely(iph->protocol == IPPROTO_TCP)) {
2119 check = &(tcp_hdr(skb)->check);
2120 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2121 mac_iocb_ptr->total_hdrs_len =
2122 cpu_to_le16(skb_transport_offset(skb) +
2123 (tcp_hdr(skb)->doff << 2));
2124 } else {
2125 check = &(udp_hdr(skb)->check);
2126 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2127 mac_iocb_ptr->total_hdrs_len =
2128 cpu_to_le16(skb_transport_offset(skb) +
2129 sizeof(struct udphdr));
2130 }
2131 *check = ~csum_tcpudp_magic(iph->saddr,
2132 iph->daddr, len, iph->protocol, 0);
2133}
2134
2135static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
2136{
2137 struct tx_ring_desc *tx_ring_desc;
2138 struct ob_mac_iocb_req *mac_iocb_ptr;
2139 struct ql_adapter *qdev = netdev_priv(ndev);
2140 int tso;
2141 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002142 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002143
2144 tx_ring = &qdev->tx_ring[tx_ring_idx];
2145
Ron Mercer74c50b42009-03-09 10:59:27 +00002146 if (skb_padto(skb, ETH_ZLEN))
2147 return NETDEV_TX_OK;
2148
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002149 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2150 QPRINTK(qdev, TX_QUEUED, INFO,
2151 "%s: shutting down tx queue %d du to lack of resources.\n",
2152 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002153 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002154 atomic_inc(&tx_ring->queue_stopped);
2155 return NETDEV_TX_BUSY;
2156 }
2157 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2158 mac_iocb_ptr = tx_ring_desc->queue_entry;
2159 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160
2161 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2162 mac_iocb_ptr->tid = tx_ring_desc->index;
2163 /* We use the upper 32-bits to store the tx queue for this IO.
2164 * When we get the completion we can use it to establish the context.
2165 */
2166 mac_iocb_ptr->txq_idx = tx_ring_idx;
2167 tx_ring_desc->skb = skb;
2168
2169 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2170
2171 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2172 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2173 vlan_tx_tag_get(skb));
2174 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2175 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2176 }
2177 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2178 if (tso < 0) {
2179 dev_kfree_skb_any(skb);
2180 return NETDEV_TX_OK;
2181 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2182 ql_hw_csum_setup(skb,
2183 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2184 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002185 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2186 NETDEV_TX_OK) {
2187 QPRINTK(qdev, TX_QUEUED, ERR,
2188 "Could not map the segments.\n");
2189 return NETDEV_TX_BUSY;
2190 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002191 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2192 tx_ring->prod_idx++;
2193 if (tx_ring->prod_idx == tx_ring->wq_len)
2194 tx_ring->prod_idx = 0;
2195 wmb();
2196
2197 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002198 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2199 tx_ring->prod_idx, skb->len);
2200
2201 atomic_dec(&tx_ring->tx_count);
2202 return NETDEV_TX_OK;
2203}
2204
2205static void ql_free_shadow_space(struct ql_adapter *qdev)
2206{
2207 if (qdev->rx_ring_shadow_reg_area) {
2208 pci_free_consistent(qdev->pdev,
2209 PAGE_SIZE,
2210 qdev->rx_ring_shadow_reg_area,
2211 qdev->rx_ring_shadow_reg_dma);
2212 qdev->rx_ring_shadow_reg_area = NULL;
2213 }
2214 if (qdev->tx_ring_shadow_reg_area) {
2215 pci_free_consistent(qdev->pdev,
2216 PAGE_SIZE,
2217 qdev->tx_ring_shadow_reg_area,
2218 qdev->tx_ring_shadow_reg_dma);
2219 qdev->tx_ring_shadow_reg_area = NULL;
2220 }
2221}
2222
2223static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2224{
2225 qdev->rx_ring_shadow_reg_area =
2226 pci_alloc_consistent(qdev->pdev,
2227 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2228 if (qdev->rx_ring_shadow_reg_area == NULL) {
2229 QPRINTK(qdev, IFUP, ERR,
2230 "Allocation of RX shadow space failed.\n");
2231 return -ENOMEM;
2232 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002233 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002234 qdev->tx_ring_shadow_reg_area =
2235 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2236 &qdev->tx_ring_shadow_reg_dma);
2237 if (qdev->tx_ring_shadow_reg_area == NULL) {
2238 QPRINTK(qdev, IFUP, ERR,
2239 "Allocation of TX shadow space failed.\n");
2240 goto err_wqp_sh_area;
2241 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002242 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002243 return 0;
2244
2245err_wqp_sh_area:
2246 pci_free_consistent(qdev->pdev,
2247 PAGE_SIZE,
2248 qdev->rx_ring_shadow_reg_area,
2249 qdev->rx_ring_shadow_reg_dma);
2250 return -ENOMEM;
2251}
2252
2253static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2254{
2255 struct tx_ring_desc *tx_ring_desc;
2256 int i;
2257 struct ob_mac_iocb_req *mac_iocb_ptr;
2258
2259 mac_iocb_ptr = tx_ring->wq_base;
2260 tx_ring_desc = tx_ring->q;
2261 for (i = 0; i < tx_ring->wq_len; i++) {
2262 tx_ring_desc->index = i;
2263 tx_ring_desc->skb = NULL;
2264 tx_ring_desc->queue_entry = mac_iocb_ptr;
2265 mac_iocb_ptr++;
2266 tx_ring_desc++;
2267 }
2268 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2269 atomic_set(&tx_ring->queue_stopped, 0);
2270}
2271
2272static void ql_free_tx_resources(struct ql_adapter *qdev,
2273 struct tx_ring *tx_ring)
2274{
2275 if (tx_ring->wq_base) {
2276 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2277 tx_ring->wq_base, tx_ring->wq_base_dma);
2278 tx_ring->wq_base = NULL;
2279 }
2280 kfree(tx_ring->q);
2281 tx_ring->q = NULL;
2282}
2283
2284static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2285 struct tx_ring *tx_ring)
2286{
2287 tx_ring->wq_base =
2288 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2289 &tx_ring->wq_base_dma);
2290
2291 if ((tx_ring->wq_base == NULL)
Ron Mercer88c55e32009-06-10 15:49:33 +00002292 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002293 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2294 return -ENOMEM;
2295 }
2296 tx_ring->q =
2297 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2298 if (tx_ring->q == NULL)
2299 goto err;
2300
2301 return 0;
2302err:
2303 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2304 tx_ring->wq_base, tx_ring->wq_base_dma);
2305 return -ENOMEM;
2306}
2307
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002308static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002309{
2310 int i;
2311 struct bq_desc *lbq_desc;
2312
2313 for (i = 0; i < rx_ring->lbq_len; i++) {
2314 lbq_desc = &rx_ring->lbq[i];
2315 if (lbq_desc->p.lbq_page) {
2316 pci_unmap_page(qdev->pdev,
2317 pci_unmap_addr(lbq_desc, mapaddr),
2318 pci_unmap_len(lbq_desc, maplen),
2319 PCI_DMA_FROMDEVICE);
2320
2321 put_page(lbq_desc->p.lbq_page);
2322 lbq_desc->p.lbq_page = NULL;
2323 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002324 }
2325}
2326
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002327static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328{
2329 int i;
2330 struct bq_desc *sbq_desc;
2331
2332 for (i = 0; i < rx_ring->sbq_len; i++) {
2333 sbq_desc = &rx_ring->sbq[i];
2334 if (sbq_desc == NULL) {
2335 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2336 return;
2337 }
2338 if (sbq_desc->p.skb) {
2339 pci_unmap_single(qdev->pdev,
2340 pci_unmap_addr(sbq_desc, mapaddr),
2341 pci_unmap_len(sbq_desc, maplen),
2342 PCI_DMA_FROMDEVICE);
2343 dev_kfree_skb(sbq_desc->p.skb);
2344 sbq_desc->p.skb = NULL;
2345 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002346 }
2347}
2348
Ron Mercer4545a3f2009-02-23 10:42:17 +00002349/* Free all large and small rx buffers associated
2350 * with the completion queues for this device.
2351 */
2352static void ql_free_rx_buffers(struct ql_adapter *qdev)
2353{
2354 int i;
2355 struct rx_ring *rx_ring;
2356
2357 for (i = 0; i < qdev->rx_ring_count; i++) {
2358 rx_ring = &qdev->rx_ring[i];
2359 if (rx_ring->lbq)
2360 ql_free_lbq_buffers(qdev, rx_ring);
2361 if (rx_ring->sbq)
2362 ql_free_sbq_buffers(qdev, rx_ring);
2363 }
2364}
2365
2366static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2367{
2368 struct rx_ring *rx_ring;
2369 int i;
2370
2371 for (i = 0; i < qdev->rx_ring_count; i++) {
2372 rx_ring = &qdev->rx_ring[i];
2373 if (rx_ring->type != TX_Q)
2374 ql_update_buffer_queues(qdev, rx_ring);
2375 }
2376}
2377
2378static void ql_init_lbq_ring(struct ql_adapter *qdev,
2379 struct rx_ring *rx_ring)
2380{
2381 int i;
2382 struct bq_desc *lbq_desc;
2383 __le64 *bq = rx_ring->lbq_base;
2384
2385 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2386 for (i = 0; i < rx_ring->lbq_len; i++) {
2387 lbq_desc = &rx_ring->lbq[i];
2388 memset(lbq_desc, 0, sizeof(*lbq_desc));
2389 lbq_desc->index = i;
2390 lbq_desc->addr = bq;
2391 bq++;
2392 }
2393}
2394
2395static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002396 struct rx_ring *rx_ring)
2397{
2398 int i;
2399 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002400 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002401
Ron Mercer4545a3f2009-02-23 10:42:17 +00002402 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002403 for (i = 0; i < rx_ring->sbq_len; i++) {
2404 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002405 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002406 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002407 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002408 bq++;
2409 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002410}
2411
2412static void ql_free_rx_resources(struct ql_adapter *qdev,
2413 struct rx_ring *rx_ring)
2414{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002415 /* Free the small buffer queue. */
2416 if (rx_ring->sbq_base) {
2417 pci_free_consistent(qdev->pdev,
2418 rx_ring->sbq_size,
2419 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2420 rx_ring->sbq_base = NULL;
2421 }
2422
2423 /* Free the small buffer queue control blocks. */
2424 kfree(rx_ring->sbq);
2425 rx_ring->sbq = NULL;
2426
2427 /* Free the large buffer queue. */
2428 if (rx_ring->lbq_base) {
2429 pci_free_consistent(qdev->pdev,
2430 rx_ring->lbq_size,
2431 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2432 rx_ring->lbq_base = NULL;
2433 }
2434
2435 /* Free the large buffer queue control blocks. */
2436 kfree(rx_ring->lbq);
2437 rx_ring->lbq = NULL;
2438
2439 /* Free the rx queue. */
2440 if (rx_ring->cq_base) {
2441 pci_free_consistent(qdev->pdev,
2442 rx_ring->cq_size,
2443 rx_ring->cq_base, rx_ring->cq_base_dma);
2444 rx_ring->cq_base = NULL;
2445 }
2446}
2447
2448/* Allocate queues and buffers for this completions queue based
2449 * on the values in the parameter structure. */
2450static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2451 struct rx_ring *rx_ring)
2452{
2453
2454 /*
2455 * Allocate the completion queue for this rx_ring.
2456 */
2457 rx_ring->cq_base =
2458 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2459 &rx_ring->cq_base_dma);
2460
2461 if (rx_ring->cq_base == NULL) {
2462 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2463 return -ENOMEM;
2464 }
2465
2466 if (rx_ring->sbq_len) {
2467 /*
2468 * Allocate small buffer queue.
2469 */
2470 rx_ring->sbq_base =
2471 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2472 &rx_ring->sbq_base_dma);
2473
2474 if (rx_ring->sbq_base == NULL) {
2475 QPRINTK(qdev, IFUP, ERR,
2476 "Small buffer queue allocation failed.\n");
2477 goto err_mem;
2478 }
2479
2480 /*
2481 * Allocate small buffer queue control blocks.
2482 */
2483 rx_ring->sbq =
2484 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2485 GFP_KERNEL);
2486 if (rx_ring->sbq == NULL) {
2487 QPRINTK(qdev, IFUP, ERR,
2488 "Small buffer queue control block allocation failed.\n");
2489 goto err_mem;
2490 }
2491
Ron Mercer4545a3f2009-02-23 10:42:17 +00002492 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002493 }
2494
2495 if (rx_ring->lbq_len) {
2496 /*
2497 * Allocate large buffer queue.
2498 */
2499 rx_ring->lbq_base =
2500 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2501 &rx_ring->lbq_base_dma);
2502
2503 if (rx_ring->lbq_base == NULL) {
2504 QPRINTK(qdev, IFUP, ERR,
2505 "Large buffer queue allocation failed.\n");
2506 goto err_mem;
2507 }
2508 /*
2509 * Allocate large buffer queue control blocks.
2510 */
2511 rx_ring->lbq =
2512 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2513 GFP_KERNEL);
2514 if (rx_ring->lbq == NULL) {
2515 QPRINTK(qdev, IFUP, ERR,
2516 "Large buffer queue control block allocation failed.\n");
2517 goto err_mem;
2518 }
2519
Ron Mercer4545a3f2009-02-23 10:42:17 +00002520 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002521 }
2522
2523 return 0;
2524
2525err_mem:
2526 ql_free_rx_resources(qdev, rx_ring);
2527 return -ENOMEM;
2528}
2529
2530static void ql_tx_ring_clean(struct ql_adapter *qdev)
2531{
2532 struct tx_ring *tx_ring;
2533 struct tx_ring_desc *tx_ring_desc;
2534 int i, j;
2535
2536 /*
2537 * Loop through all queues and free
2538 * any resources.
2539 */
2540 for (j = 0; j < qdev->tx_ring_count; j++) {
2541 tx_ring = &qdev->tx_ring[j];
2542 for (i = 0; i < tx_ring->wq_len; i++) {
2543 tx_ring_desc = &tx_ring->q[i];
2544 if (tx_ring_desc && tx_ring_desc->skb) {
2545 QPRINTK(qdev, IFDOWN, ERR,
2546 "Freeing lost SKB %p, from queue %d, index %d.\n",
2547 tx_ring_desc->skb, j,
2548 tx_ring_desc->index);
2549 ql_unmap_send(qdev, tx_ring_desc,
2550 tx_ring_desc->map_cnt);
2551 dev_kfree_skb(tx_ring_desc->skb);
2552 tx_ring_desc->skb = NULL;
2553 }
2554 }
2555 }
2556}
2557
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002558static void ql_free_mem_resources(struct ql_adapter *qdev)
2559{
2560 int i;
2561
2562 for (i = 0; i < qdev->tx_ring_count; i++)
2563 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2564 for (i = 0; i < qdev->rx_ring_count; i++)
2565 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2566 ql_free_shadow_space(qdev);
2567}
2568
2569static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2570{
2571 int i;
2572
2573 /* Allocate space for our shadow registers and such. */
2574 if (ql_alloc_shadow_space(qdev))
2575 return -ENOMEM;
2576
2577 for (i = 0; i < qdev->rx_ring_count; i++) {
2578 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2579 QPRINTK(qdev, IFUP, ERR,
2580 "RX resource allocation failed.\n");
2581 goto err_mem;
2582 }
2583 }
2584 /* Allocate tx queue resources */
2585 for (i = 0; i < qdev->tx_ring_count; i++) {
2586 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2587 QPRINTK(qdev, IFUP, ERR,
2588 "TX resource allocation failed.\n");
2589 goto err_mem;
2590 }
2591 }
2592 return 0;
2593
2594err_mem:
2595 ql_free_mem_resources(qdev);
2596 return -ENOMEM;
2597}
2598
2599/* Set up the rx ring control block and pass it to the chip.
2600 * The control block is defined as
2601 * "Completion Queue Initialization Control Block", or cqicb.
2602 */
2603static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2604{
2605 struct cqicb *cqicb = &rx_ring->cqicb;
2606 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002607 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002608 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002609 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002610 void __iomem *doorbell_area =
2611 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2612 int err = 0;
2613 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002614 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002615 __le64 *base_indirect_ptr;
2616 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002617
2618 /* Set up the shadow registers for this ring. */
2619 rx_ring->prod_idx_sh_reg = shadow_reg;
2620 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2621 shadow_reg += sizeof(u64);
2622 shadow_reg_dma += sizeof(u64);
2623 rx_ring->lbq_base_indirect = shadow_reg;
2624 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002625 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2626 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002627 rx_ring->sbq_base_indirect = shadow_reg;
2628 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2629
2630 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002631 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002632 rx_ring->cnsmr_idx = 0;
2633 rx_ring->curr_entry = rx_ring->cq_base;
2634
2635 /* PCI doorbell mem area + 0x04 for valid register */
2636 rx_ring->valid_db_reg = doorbell_area + 0x04;
2637
2638 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002639 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002640
2641 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002642 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002643
2644 memset((void *)cqicb, 0, sizeof(struct cqicb));
2645 cqicb->msix_vect = rx_ring->irq;
2646
Ron Mercer459caf52009-01-04 17:08:11 -08002647 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2648 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002649
Ron Mercer97345522009-01-09 11:31:50 +00002650 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002651
Ron Mercer97345522009-01-09 11:31:50 +00002652 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002653
2654 /*
2655 * Set up the control block load flags.
2656 */
2657 cqicb->flags = FLAGS_LC | /* Load queue base address */
2658 FLAGS_LV | /* Load MSI-X vector */
2659 FLAGS_LI; /* Load irq delay values */
2660 if (rx_ring->lbq_len) {
2661 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002662 tmp = (u64)rx_ring->lbq_base_dma;;
Ron Mercerb8facca2009-06-10 15:49:34 +00002663 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2664 page_entries = 0;
2665 do {
2666 *base_indirect_ptr = cpu_to_le64(tmp);
2667 tmp += DB_PAGE_SIZE;
2668 base_indirect_ptr++;
2669 page_entries++;
2670 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002671 cqicb->lbq_addr =
2672 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002673 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2674 (u16) rx_ring->lbq_buf_size;
2675 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2676 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2677 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002678 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002679 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002680 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002681 rx_ring->lbq_clean_idx = 0;
2682 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002683 }
2684 if (rx_ring->sbq_len) {
2685 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002686 tmp = (u64)rx_ring->sbq_base_dma;;
Ron Mercerb8facca2009-06-10 15:49:34 +00002687 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2688 page_entries = 0;
2689 do {
2690 *base_indirect_ptr = cpu_to_le64(tmp);
2691 tmp += DB_PAGE_SIZE;
2692 base_indirect_ptr++;
2693 page_entries++;
2694 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002695 cqicb->sbq_addr =
2696 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002697 cqicb->sbq_buf_size =
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002698 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
Ron Mercer459caf52009-01-04 17:08:11 -08002699 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2700 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002701 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002702 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002703 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002704 rx_ring->sbq_clean_idx = 0;
2705 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002706 }
2707 switch (rx_ring->type) {
2708 case TX_Q:
2709 /* If there's only one interrupt, then we use
2710 * worker threads to process the outbound
2711 * completion handling rx_rings. We do this so
2712 * they can be run on multiple CPUs. There is
2713 * room to play with this more where we would only
2714 * run in a worker if there are more than x number
2715 * of outbound completions on the queue and more
2716 * than one queue active. Some threshold that
2717 * would indicate a benefit in spite of the cost
2718 * of a context switch.
2719 * If there's more than one interrupt, then the
2720 * outbound completions are processed in the ISR.
2721 */
2722 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2723 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2724 else {
2725 /* With all debug warnings on we see a WARN_ON message
2726 * when we free the skb in the interrupt context.
2727 */
2728 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2729 }
2730 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2731 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2732 break;
2733 case DEFAULT_Q:
2734 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2735 cqicb->irq_delay = 0;
2736 cqicb->pkt_delay = 0;
2737 break;
2738 case RX_Q:
2739 /* Inbound completion handling rx_rings run in
2740 * separate NAPI contexts.
2741 */
2742 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2743 64);
2744 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2745 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2746 break;
2747 default:
2748 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2749 rx_ring->type);
2750 }
Ron Mercer49740972009-02-26 10:08:36 +00002751 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002752 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2753 CFG_LCQ, rx_ring->cq_id);
2754 if (err) {
2755 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2756 return err;
2757 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002758 return err;
2759}
2760
2761static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2762{
2763 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2764 void __iomem *doorbell_area =
2765 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2766 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2767 (tx_ring->wq_id * sizeof(u64));
2768 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2769 (tx_ring->wq_id * sizeof(u64));
2770 int err = 0;
2771
2772 /*
2773 * Assign doorbell registers for this tx_ring.
2774 */
2775 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002776 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002777 tx_ring->prod_idx = 0;
2778 /* TX PCI doorbell mem area + 0x04 */
2779 tx_ring->valid_db_reg = doorbell_area + 0x04;
2780
2781 /*
2782 * Assign shadow registers for this tx_ring.
2783 */
2784 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2785 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2786
2787 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2788 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2789 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2790 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2791 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002792 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002793
Ron Mercer97345522009-01-09 11:31:50 +00002794 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002795
2796 ql_init_tx_ring(qdev, tx_ring);
2797
2798 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2799 (u16) tx_ring->wq_id);
2800 if (err) {
2801 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2802 return err;
2803 }
Ron Mercer49740972009-02-26 10:08:36 +00002804 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002805 return err;
2806}
2807
2808static void ql_disable_msix(struct ql_adapter *qdev)
2809{
2810 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2811 pci_disable_msix(qdev->pdev);
2812 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2813 kfree(qdev->msi_x_entry);
2814 qdev->msi_x_entry = NULL;
2815 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2816 pci_disable_msi(qdev->pdev);
2817 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2818 }
2819}
2820
2821static void ql_enable_msix(struct ql_adapter *qdev)
2822{
2823 int i;
2824
2825 qdev->intr_count = 1;
2826 /* Get the MSIX vectors. */
2827 if (irq_type == MSIX_IRQ) {
2828 /* Try to alloc space for the msix struct,
2829 * if it fails then go to MSI/legacy.
2830 */
2831 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2832 sizeof(struct msix_entry),
2833 GFP_KERNEL);
2834 if (!qdev->msi_x_entry) {
2835 irq_type = MSI_IRQ;
2836 goto msi;
2837 }
2838
2839 for (i = 0; i < qdev->rx_ring_count; i++)
2840 qdev->msi_x_entry[i].entry = i;
2841
2842 if (!pci_enable_msix
2843 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2844 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2845 qdev->intr_count = qdev->rx_ring_count;
Ron Mercer49740972009-02-26 10:08:36 +00002846 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002847 "MSI-X Enabled, got %d vectors.\n",
2848 qdev->intr_count);
2849 return;
2850 } else {
2851 kfree(qdev->msi_x_entry);
2852 qdev->msi_x_entry = NULL;
2853 QPRINTK(qdev, IFUP, WARNING,
2854 "MSI-X Enable failed, trying MSI.\n");
2855 irq_type = MSI_IRQ;
2856 }
2857 }
2858msi:
2859 if (irq_type == MSI_IRQ) {
2860 if (!pci_enable_msi(qdev->pdev)) {
2861 set_bit(QL_MSI_ENABLED, &qdev->flags);
2862 QPRINTK(qdev, IFUP, INFO,
2863 "Running with MSI interrupts.\n");
2864 return;
2865 }
2866 }
2867 irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002868 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2869}
2870
2871/*
2872 * Here we build the intr_context structures based on
2873 * our rx_ring count and intr vector count.
2874 * The intr_context structure is used to hook each vector
2875 * to possibly different handlers.
2876 */
2877static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2878{
2879 int i = 0;
2880 struct intr_context *intr_context = &qdev->intr_context[0];
2881
2882 ql_enable_msix(qdev);
2883
2884 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2885 /* Each rx_ring has it's
2886 * own intr_context since we have separate
2887 * vectors for each queue.
2888 * This only true when MSI-X is enabled.
2889 */
2890 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2891 qdev->rx_ring[i].irq = i;
2892 intr_context->intr = i;
2893 intr_context->qdev = qdev;
2894 /*
2895 * We set up each vectors enable/disable/read bits so
2896 * there's no bit/mask calculations in the critical path.
2897 */
2898 intr_context->intr_en_mask =
2899 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2900 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2901 | i;
2902 intr_context->intr_dis_mask =
2903 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2904 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2905 INTR_EN_IHD | i;
2906 intr_context->intr_read_mask =
2907 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2908 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2909 i;
2910
2911 if (i == 0) {
2912 /*
2913 * Default queue handles bcast/mcast plus
2914 * async events. Needs buffers.
2915 */
2916 intr_context->handler = qlge_isr;
2917 sprintf(intr_context->name, "%s-default-queue",
2918 qdev->ndev->name);
2919 } else if (i < qdev->rss_ring_first_cq_id) {
2920 /*
2921 * Outbound queue is for outbound completions only.
2922 */
2923 intr_context->handler = qlge_msix_tx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002924 sprintf(intr_context->name, "%s-tx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002925 qdev->ndev->name, i);
2926 } else {
2927 /*
2928 * Inbound queues handle unicast frames only.
2929 */
2930 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002931 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002932 qdev->ndev->name, i);
2933 }
2934 }
2935 } else {
2936 /*
2937 * All rx_rings use the same intr_context since
2938 * there is only one vector.
2939 */
2940 intr_context->intr = 0;
2941 intr_context->qdev = qdev;
2942 /*
2943 * We set up each vectors enable/disable/read bits so
2944 * there's no bit/mask calculations in the critical path.
2945 */
2946 intr_context->intr_en_mask =
2947 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2948 intr_context->intr_dis_mask =
2949 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2950 INTR_EN_TYPE_DISABLE;
2951 intr_context->intr_read_mask =
2952 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2953 /*
2954 * Single interrupt means one handler for all rings.
2955 */
2956 intr_context->handler = qlge_isr;
2957 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2958 for (i = 0; i < qdev->rx_ring_count; i++)
2959 qdev->rx_ring[i].irq = 0;
2960 }
2961}
2962
2963static void ql_free_irq(struct ql_adapter *qdev)
2964{
2965 int i;
2966 struct intr_context *intr_context = &qdev->intr_context[0];
2967
2968 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2969 if (intr_context->hooked) {
2970 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2971 free_irq(qdev->msi_x_entry[i].vector,
2972 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00002973 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002974 "freeing msix interrupt %d.\n", i);
2975 } else {
2976 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00002977 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002978 "freeing msi interrupt %d.\n", i);
2979 }
2980 }
2981 }
2982 ql_disable_msix(qdev);
2983}
2984
2985static int ql_request_irq(struct ql_adapter *qdev)
2986{
2987 int i;
2988 int status = 0;
2989 struct pci_dev *pdev = qdev->pdev;
2990 struct intr_context *intr_context = &qdev->intr_context[0];
2991
2992 ql_resolve_queues_to_irqs(qdev);
2993
2994 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2995 atomic_set(&intr_context->irq_cnt, 0);
2996 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2997 status = request_irq(qdev->msi_x_entry[i].vector,
2998 intr_context->handler,
2999 0,
3000 intr_context->name,
3001 &qdev->rx_ring[i]);
3002 if (status) {
3003 QPRINTK(qdev, IFUP, ERR,
3004 "Failed request for MSIX interrupt %d.\n",
3005 i);
3006 goto err_irq;
3007 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003008 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003009 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3010 i,
3011 qdev->rx_ring[i].type ==
3012 DEFAULT_Q ? "DEFAULT_Q" : "",
3013 qdev->rx_ring[i].type ==
3014 TX_Q ? "TX_Q" : "",
3015 qdev->rx_ring[i].type ==
3016 RX_Q ? "RX_Q" : "", intr_context->name);
3017 }
3018 } else {
3019 QPRINTK(qdev, IFUP, DEBUG,
3020 "trying msi or legacy interrupts.\n");
3021 QPRINTK(qdev, IFUP, DEBUG,
3022 "%s: irq = %d.\n", __func__, pdev->irq);
3023 QPRINTK(qdev, IFUP, DEBUG,
3024 "%s: context->name = %s.\n", __func__,
3025 intr_context->name);
3026 QPRINTK(qdev, IFUP, DEBUG,
3027 "%s: dev_id = 0x%p.\n", __func__,
3028 &qdev->rx_ring[0]);
3029 status =
3030 request_irq(pdev->irq, qlge_isr,
3031 test_bit(QL_MSI_ENABLED,
3032 &qdev->
3033 flags) ? 0 : IRQF_SHARED,
3034 intr_context->name, &qdev->rx_ring[0]);
3035 if (status)
3036 goto err_irq;
3037
3038 QPRINTK(qdev, IFUP, ERR,
3039 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3040 i,
3041 qdev->rx_ring[0].type ==
3042 DEFAULT_Q ? "DEFAULT_Q" : "",
3043 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3044 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3045 intr_context->name);
3046 }
3047 intr_context->hooked = 1;
3048 }
3049 return status;
3050err_irq:
3051 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3052 ql_free_irq(qdev);
3053 return status;
3054}
3055
3056static int ql_start_rss(struct ql_adapter *qdev)
3057{
3058 struct ricb *ricb = &qdev->ricb;
3059 int status = 0;
3060 int i;
3061 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3062
3063 memset((void *)ricb, 0, sizeof(ricb));
3064
3065 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
3066 ricb->flags =
3067 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
3068 RSS_RT6);
3069 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
3070
3071 /*
3072 * Fill out the Indirection Table.
3073 */
Ron Mercerdef48b62009-02-12 16:38:18 -08003074 for (i = 0; i < 256; i++)
3075 hash_id[i] = i & (qdev->rss_ring_count - 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076
3077 /*
3078 * Random values for the IPv6 and IPv4 Hash Keys.
3079 */
3080 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
3081 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
3082
Ron Mercer49740972009-02-26 10:08:36 +00003083 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003084
3085 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
3086 if (status) {
3087 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3088 return status;
3089 }
Ron Mercer49740972009-02-26 10:08:36 +00003090 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003091 return status;
3092}
3093
Ron Mercera5f59dc2009-07-02 06:06:07 +00003094static int ql_clear_routing_entries(struct ql_adapter *qdev)
3095{
3096 int i, status = 0;
3097
3098 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3099 if (status)
3100 return status;
3101 /* Clear all the entries in the routing table. */
3102 for (i = 0; i < 16; i++) {
3103 status = ql_set_routing_reg(qdev, i, 0, 0);
3104 if (status) {
3105 QPRINTK(qdev, IFUP, ERR,
3106 "Failed to init routing register for CAM "
3107 "packets.\n");
3108 break;
3109 }
3110 }
3111 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3112 return status;
3113}
3114
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003115/* Initialize the frame-to-queue routing. */
3116static int ql_route_initialize(struct ql_adapter *qdev)
3117{
3118 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003119
Ron Mercer8587ea32009-02-23 10:42:15 +00003120 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3121 if (status)
3122 return status;
3123
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003124 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003125 status = ql_clear_routing_entries(qdev);
3126 if (status)
3127 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003128
3129 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3130 if (status) {
3131 QPRINTK(qdev, IFUP, ERR,
3132 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003133 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134 }
3135 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3136 if (status) {
3137 QPRINTK(qdev, IFUP, ERR,
3138 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003139 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003140 }
3141 /* If we have more than one inbound queue, then turn on RSS in the
3142 * routing block.
3143 */
3144 if (qdev->rss_ring_count > 1) {
3145 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3146 RT_IDX_RSS_MATCH, 1);
3147 if (status) {
3148 QPRINTK(qdev, IFUP, ERR,
3149 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003150 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003151 }
3152 }
3153
3154 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3155 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003156 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003157 QPRINTK(qdev, IFUP, ERR,
3158 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003159exit:
3160 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003161 return status;
3162}
3163
Ron Mercer2ee1e272009-03-03 12:10:33 +00003164int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003165{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003166 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003167
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003168 /* If check if the link is up and use to
3169 * determine if we are setting or clearing
3170 * the MAC address in the CAM.
3171 */
3172 set = ql_read32(qdev, STS);
3173 set &= qdev->port_link_up;
3174 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003175 if (status) {
3176 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3177 return status;
3178 }
3179
3180 status = ql_route_initialize(qdev);
3181 if (status)
3182 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3183
3184 return status;
3185}
3186
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003187static int ql_adapter_initialize(struct ql_adapter *qdev)
3188{
3189 u32 value, mask;
3190 int i;
3191 int status = 0;
3192
3193 /*
3194 * Set up the System register to halt on errors.
3195 */
3196 value = SYS_EFE | SYS_FAE;
3197 mask = value << 16;
3198 ql_write32(qdev, SYS, mask | value);
3199
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003200 /* Set the default queue, and VLAN behavior. */
3201 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3202 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003203 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3204
3205 /* Set the MPI interrupt to enabled. */
3206 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3207
3208 /* Enable the function, set pagesize, enable error checking. */
3209 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3210 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3211
3212 /* Set/clear header splitting. */
3213 mask = FSC_VM_PAGESIZE_MASK |
3214 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3215 ql_write32(qdev, FSC, mask | value);
3216
3217 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3218 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3219
3220 /* Start up the rx queues. */
3221 for (i = 0; i < qdev->rx_ring_count; i++) {
3222 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3223 if (status) {
3224 QPRINTK(qdev, IFUP, ERR,
3225 "Failed to start rx ring[%d].\n", i);
3226 return status;
3227 }
3228 }
3229
3230 /* If there is more than one inbound completion queue
3231 * then download a RICB to configure RSS.
3232 */
3233 if (qdev->rss_ring_count > 1) {
3234 status = ql_start_rss(qdev);
3235 if (status) {
3236 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3237 return status;
3238 }
3239 }
3240
3241 /* Start up the tx queues. */
3242 for (i = 0; i < qdev->tx_ring_count; i++) {
3243 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3244 if (status) {
3245 QPRINTK(qdev, IFUP, ERR,
3246 "Failed to start tx ring[%d].\n", i);
3247 return status;
3248 }
3249 }
3250
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003251 /* Initialize the port and set the max framesize. */
3252 status = qdev->nic_ops->port_initialize(qdev);
3253 if (status) {
3254 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3255 return status;
3256 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003257
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003258 /* Set up the MAC address and frame routing filter. */
3259 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003261 QPRINTK(qdev, IFUP, ERR,
3262 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003263 return status;
3264 }
3265
3266 /* Start NAPI for the RSS queues. */
3267 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003268 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003269 i);
3270 napi_enable(&qdev->rx_ring[i].napi);
3271 }
3272
3273 return status;
3274}
3275
3276/* Issue soft reset to chip. */
3277static int ql_adapter_reset(struct ql_adapter *qdev)
3278{
3279 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003280 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003281 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003282
Ron Mercera5f59dc2009-07-02 06:06:07 +00003283 /* Clear all the entries in the routing table. */
3284 status = ql_clear_routing_entries(qdev);
3285 if (status) {
3286 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3287 return status;
3288 }
3289
3290 end_jiffies = jiffies +
3291 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003292 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003293
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003294 do {
3295 value = ql_read32(qdev, RST_FO);
3296 if ((value & RST_FO_FR) == 0)
3297 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003298 cpu_relax();
3299 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003300
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003301 if (value & RST_FO_FR) {
3302 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003303 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003304 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003305 }
3306
3307 return status;
3308}
3309
3310static void ql_display_dev_info(struct net_device *ndev)
3311{
3312 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3313
3314 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003315 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003316 "XG Roll = %d, XG Rev = %d.\n",
3317 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003318 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003319 qdev->chip_rev_id & 0x0000000f,
3320 qdev->chip_rev_id >> 4 & 0x0000000f,
3321 qdev->chip_rev_id >> 8 & 0x0000000f,
3322 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003323 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003324}
3325
3326static int ql_adapter_down(struct ql_adapter *qdev)
3327{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003328 int i, status = 0;
3329 struct rx_ring *rx_ring;
3330
Ron Mercer6a473302009-07-02 06:06:12 +00003331 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003332
Ron Mercer6497b602009-02-12 16:37:13 -08003333 /* Don't kill the reset worker thread if we
3334 * are in the process of recovery.
3335 */
3336 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3337 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003338 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3339 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003340 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003341 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003342
3343 /* The default queue at index 0 is always processed in
3344 * a workqueue.
3345 */
3346 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3347
3348 /* The rest of the rx_rings are processed in
3349 * a workqueue only if it's a single interrupt
3350 * environment (MSI/Legacy).
3351 */
Roel Kluinc0620762008-12-25 17:23:50 -08003352 for (i = 1; i < qdev->rx_ring_count; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003353 rx_ring = &qdev->rx_ring[i];
3354 /* Only the RSS rings use NAPI on multi irq
3355 * environment. Outbound completion processing
3356 * is done in interrupt context.
3357 */
3358 if (i >= qdev->rss_ring_first_cq_id) {
3359 napi_disable(&rx_ring->napi);
3360 } else {
3361 cancel_delayed_work_sync(&rx_ring->rx_work);
3362 }
3363 }
3364
3365 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3366
3367 ql_disable_interrupts(qdev);
3368
3369 ql_tx_ring_clean(qdev);
3370
Ron Mercer6b318cb2009-03-09 10:59:26 +00003371 /* Call netif_napi_del() from common point.
3372 */
3373 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3374 netif_napi_del(&qdev->rx_ring[i].napi);
3375
Ron Mercer4545a3f2009-02-23 10:42:17 +00003376 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003377
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003378 spin_lock(&qdev->hw_lock);
3379 status = ql_adapter_reset(qdev);
3380 if (status)
3381 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3382 qdev->func);
3383 spin_unlock(&qdev->hw_lock);
3384 return status;
3385}
3386
3387static int ql_adapter_up(struct ql_adapter *qdev)
3388{
3389 int err = 0;
3390
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003391 err = ql_adapter_initialize(qdev);
3392 if (err) {
3393 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003394 goto err_init;
3395 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003396 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003397 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003398 /* If the port is initialized and the
3399 * link is up the turn on the carrier.
3400 */
3401 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3402 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003403 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003404 ql_enable_interrupts(qdev);
3405 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003406 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003407
3408 return 0;
3409err_init:
3410 ql_adapter_reset(qdev);
3411 return err;
3412}
3413
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003414static void ql_release_adapter_resources(struct ql_adapter *qdev)
3415{
3416 ql_free_mem_resources(qdev);
3417 ql_free_irq(qdev);
3418}
3419
3420static int ql_get_adapter_resources(struct ql_adapter *qdev)
3421{
3422 int status = 0;
3423
3424 if (ql_alloc_mem_resources(qdev)) {
3425 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3426 return -ENOMEM;
3427 }
3428 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003429 return status;
3430}
3431
3432static int qlge_close(struct net_device *ndev)
3433{
3434 struct ql_adapter *qdev = netdev_priv(ndev);
3435
3436 /*
3437 * Wait for device to recover from a reset.
3438 * (Rarely happens, but possible.)
3439 */
3440 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3441 msleep(1);
3442 ql_adapter_down(qdev);
3443 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003444 return 0;
3445}
3446
3447static int ql_configure_rings(struct ql_adapter *qdev)
3448{
3449 int i;
3450 struct rx_ring *rx_ring;
3451 struct tx_ring *tx_ring;
3452 int cpu_cnt = num_online_cpus();
3453
3454 /*
3455 * For each processor present we allocate one
3456 * rx_ring for outbound completions, and one
3457 * rx_ring for inbound completions. Plus there is
3458 * always the one default queue. For the CPU
3459 * counts we end up with the following rx_rings:
3460 * rx_ring count =
3461 * one default queue +
3462 * (CPU count * outbound completion rx_ring) +
3463 * (CPU count * inbound (RSS) completion rx_ring)
3464 * To keep it simple we limit the total number of
3465 * queues to < 32, so we truncate CPU to 8.
3466 * This limitation can be removed when requested.
3467 */
3468
Ron Mercer683d46a2009-01-09 11:31:53 +00003469 if (cpu_cnt > MAX_CPUS)
3470 cpu_cnt = MAX_CPUS;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003471
3472 /*
3473 * rx_ring[0] is always the default queue.
3474 */
3475 /* Allocate outbound completion ring for each CPU. */
3476 qdev->tx_ring_count = cpu_cnt;
3477 /* Allocate inbound completion (RSS) ring for each CPU. */
3478 qdev->rss_ring_count = cpu_cnt;
3479 /* cq_id for the first inbound ring handler. */
3480 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3481 /*
3482 * qdev->rx_ring_count:
3483 * Total number of rx_rings. This includes the one
3484 * default queue, a number of outbound completion
3485 * handler rx_rings, and the number of inbound
3486 * completion handler rx_rings.
3487 */
3488 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3489
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003490 for (i = 0; i < qdev->tx_ring_count; i++) {
3491 tx_ring = &qdev->tx_ring[i];
3492 memset((void *)tx_ring, 0, sizeof(tx_ring));
3493 tx_ring->qdev = qdev;
3494 tx_ring->wq_id = i;
3495 tx_ring->wq_len = qdev->tx_ring_size;
3496 tx_ring->wq_size =
3497 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3498
3499 /*
3500 * The completion queue ID for the tx rings start
3501 * immediately after the default Q ID, which is zero.
3502 */
3503 tx_ring->cq_id = i + 1;
3504 }
3505
3506 for (i = 0; i < qdev->rx_ring_count; i++) {
3507 rx_ring = &qdev->rx_ring[i];
3508 memset((void *)rx_ring, 0, sizeof(rx_ring));
3509 rx_ring->qdev = qdev;
3510 rx_ring->cq_id = i;
3511 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3512 if (i == 0) { /* Default queue at index 0. */
3513 /*
3514 * Default queue handles bcast/mcast plus
3515 * async events. Needs buffers.
3516 */
3517 rx_ring->cq_len = qdev->rx_ring_size;
3518 rx_ring->cq_size =
3519 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3520 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3521 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003522 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003523 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3524 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3525 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003526 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003527 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3528 rx_ring->type = DEFAULT_Q;
3529 } else if (i < qdev->rss_ring_first_cq_id) {
3530 /*
3531 * Outbound queue handles outbound completions only.
3532 */
3533 /* outbound cq is same size as tx_ring it services. */
3534 rx_ring->cq_len = qdev->tx_ring_size;
3535 rx_ring->cq_size =
3536 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3537 rx_ring->lbq_len = 0;
3538 rx_ring->lbq_size = 0;
3539 rx_ring->lbq_buf_size = 0;
3540 rx_ring->sbq_len = 0;
3541 rx_ring->sbq_size = 0;
3542 rx_ring->sbq_buf_size = 0;
3543 rx_ring->type = TX_Q;
3544 } else { /* Inbound completions (RSS) queues */
3545 /*
3546 * Inbound queues handle unicast frames only.
3547 */
3548 rx_ring->cq_len = qdev->rx_ring_size;
3549 rx_ring->cq_size =
3550 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3551 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3552 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003553 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003554 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3555 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3556 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003557 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003558 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3559 rx_ring->type = RX_Q;
3560 }
3561 }
3562 return 0;
3563}
3564
3565static int qlge_open(struct net_device *ndev)
3566{
3567 int err = 0;
3568 struct ql_adapter *qdev = netdev_priv(ndev);
3569
3570 err = ql_configure_rings(qdev);
3571 if (err)
3572 return err;
3573
3574 err = ql_get_adapter_resources(qdev);
3575 if (err)
3576 goto error_up;
3577
3578 err = ql_adapter_up(qdev);
3579 if (err)
3580 goto error_up;
3581
3582 return err;
3583
3584error_up:
3585 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586 return err;
3587}
3588
3589static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3590{
3591 struct ql_adapter *qdev = netdev_priv(ndev);
3592
3593 if (ndev->mtu == 1500 && new_mtu == 9000) {
3594 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003595 queue_delayed_work(qdev->workqueue,
3596 &qdev->mpi_port_cfg_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003597 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3598 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3599 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3600 (ndev->mtu == 9000 && new_mtu == 9000)) {
3601 return 0;
3602 } else
3603 return -EINVAL;
3604 ndev->mtu = new_mtu;
3605 return 0;
3606}
3607
3608static struct net_device_stats *qlge_get_stats(struct net_device
3609 *ndev)
3610{
3611 struct ql_adapter *qdev = netdev_priv(ndev);
3612 return &qdev->stats;
3613}
3614
3615static void qlge_set_multicast_list(struct net_device *ndev)
3616{
3617 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3618 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00003619 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003620
Ron Mercercc288f52009-02-23 10:42:14 +00003621 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3622 if (status)
3623 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003624 spin_lock(&qdev->hw_lock);
3625 /*
3626 * Set or clear promiscuous mode if a
3627 * transition is taking place.
3628 */
3629 if (ndev->flags & IFF_PROMISC) {
3630 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3631 if (ql_set_routing_reg
3632 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3633 QPRINTK(qdev, HW, ERR,
3634 "Failed to set promiscous mode.\n");
3635 } else {
3636 set_bit(QL_PROMISCUOUS, &qdev->flags);
3637 }
3638 }
3639 } else {
3640 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3641 if (ql_set_routing_reg
3642 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3643 QPRINTK(qdev, HW, ERR,
3644 "Failed to clear promiscous mode.\n");
3645 } else {
3646 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3647 }
3648 }
3649 }
3650
3651 /*
3652 * Set or clear all multicast mode if a
3653 * transition is taking place.
3654 */
3655 if ((ndev->flags & IFF_ALLMULTI) ||
3656 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3657 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3658 if (ql_set_routing_reg
3659 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3660 QPRINTK(qdev, HW, ERR,
3661 "Failed to set all-multi mode.\n");
3662 } else {
3663 set_bit(QL_ALLMULTI, &qdev->flags);
3664 }
3665 }
3666 } else {
3667 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3668 if (ql_set_routing_reg
3669 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3670 QPRINTK(qdev, HW, ERR,
3671 "Failed to clear all-multi mode.\n");
3672 } else {
3673 clear_bit(QL_ALLMULTI, &qdev->flags);
3674 }
3675 }
3676 }
3677
3678 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00003679 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3680 if (status)
3681 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003682 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3683 i++, mc_ptr = mc_ptr->next)
3684 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3685 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3686 QPRINTK(qdev, HW, ERR,
3687 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00003688 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003689 goto exit;
3690 }
Ron Mercercc288f52009-02-23 10:42:14 +00003691 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003692 if (ql_set_routing_reg
3693 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3694 QPRINTK(qdev, HW, ERR,
3695 "Failed to set multicast match mode.\n");
3696 } else {
3697 set_bit(QL_ALLMULTI, &qdev->flags);
3698 }
3699 }
3700exit:
3701 spin_unlock(&qdev->hw_lock);
Ron Mercer8587ea32009-02-23 10:42:15 +00003702 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003703}
3704
3705static int qlge_set_mac_address(struct net_device *ndev, void *p)
3706{
3707 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3708 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00003709 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003710
3711 if (netif_running(ndev))
3712 return -EBUSY;
3713
3714 if (!is_valid_ether_addr(addr->sa_data))
3715 return -EADDRNOTAVAIL;
3716 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3717
Ron Mercercc288f52009-02-23 10:42:14 +00003718 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3719 if (status)
3720 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003721 spin_lock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00003722 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3723 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003724 spin_unlock(&qdev->hw_lock);
Ron Mercercc288f52009-02-23 10:42:14 +00003725 if (status)
3726 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3727 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3728 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003729}
3730
3731static void qlge_tx_timeout(struct net_device *ndev)
3732{
3733 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003734 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003735}
3736
3737static void ql_asic_reset_work(struct work_struct *work)
3738{
3739 struct ql_adapter *qdev =
3740 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00003741 int status;
3742
3743 status = ql_adapter_down(qdev);
3744 if (status)
3745 goto error;
3746
3747 status = ql_adapter_up(qdev);
3748 if (status)
3749 goto error;
3750
3751 return;
3752error:
3753 QPRINTK(qdev, IFUP, ALERT,
3754 "Driver up/down cycle failed, closing device\n");
3755 rtnl_lock();
3756 set_bit(QL_ADAPTER_UP, &qdev->flags);
3757 dev_close(qdev->ndev);
3758 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003759}
3760
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003761static struct nic_operations qla8012_nic_ops = {
3762 .get_flash = ql_get_8012_flash_params,
3763 .port_initialize = ql_8012_port_initialize,
3764};
3765
Ron Mercercdca8d02009-03-02 08:07:31 +00003766static struct nic_operations qla8000_nic_ops = {
3767 .get_flash = ql_get_8000_flash_params,
3768 .port_initialize = ql_8000_port_initialize,
3769};
3770
Ron Mercere4552f52009-06-09 05:39:32 +00003771/* Find the pcie function number for the other NIC
3772 * on this chip. Since both NIC functions share a
3773 * common firmware we have the lowest enabled function
3774 * do any common work. Examples would be resetting
3775 * after a fatal firmware error, or doing a firmware
3776 * coredump.
3777 */
3778static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003779{
Ron Mercere4552f52009-06-09 05:39:32 +00003780 int status = 0;
3781 u32 temp;
3782 u32 nic_func1, nic_func2;
3783
3784 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3785 &temp);
3786 if (status)
3787 return status;
3788
3789 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3790 MPI_TEST_NIC_FUNC_MASK);
3791 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3792 MPI_TEST_NIC_FUNC_MASK);
3793
3794 if (qdev->func == nic_func1)
3795 qdev->alt_func = nic_func2;
3796 else if (qdev->func == nic_func2)
3797 qdev->alt_func = nic_func1;
3798 else
3799 status = -EIO;
3800
3801 return status;
3802}
3803
3804static int ql_get_board_info(struct ql_adapter *qdev)
3805{
3806 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003807 qdev->func =
3808 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00003809 if (qdev->func > 3)
3810 return -EIO;
3811
3812 status = ql_get_alt_pcie_func(qdev);
3813 if (status)
3814 return status;
3815
3816 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3817 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003818 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3819 qdev->port_link_up = STS_PL1;
3820 qdev->port_init = STS_PI1;
3821 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3822 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3823 } else {
3824 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3825 qdev->port_link_up = STS_PL0;
3826 qdev->port_init = STS_PI0;
3827 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3828 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3829 }
3830 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003831 qdev->device_id = qdev->pdev->device;
3832 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3833 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00003834 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3835 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00003836 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003837}
3838
3839static void ql_release_all(struct pci_dev *pdev)
3840{
3841 struct net_device *ndev = pci_get_drvdata(pdev);
3842 struct ql_adapter *qdev = netdev_priv(ndev);
3843
3844 if (qdev->workqueue) {
3845 destroy_workqueue(qdev->workqueue);
3846 qdev->workqueue = NULL;
3847 }
3848 if (qdev->q_workqueue) {
3849 destroy_workqueue(qdev->q_workqueue);
3850 qdev->q_workqueue = NULL;
3851 }
3852 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003853 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003854 if (qdev->doorbell_area)
3855 iounmap(qdev->doorbell_area);
3856 pci_release_regions(pdev);
3857 pci_set_drvdata(pdev, NULL);
3858}
3859
3860static int __devinit ql_init_device(struct pci_dev *pdev,
3861 struct net_device *ndev, int cards_found)
3862{
3863 struct ql_adapter *qdev = netdev_priv(ndev);
3864 int pos, err = 0;
3865 u16 val16;
3866
3867 memset((void *)qdev, 0, sizeof(qdev));
3868 err = pci_enable_device(pdev);
3869 if (err) {
3870 dev_err(&pdev->dev, "PCI device enable failed.\n");
3871 return err;
3872 }
3873
3874 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3875 if (pos <= 0) {
3876 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3877 "aborting.\n");
3878 goto err_out;
3879 } else {
3880 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3881 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3882 val16 |= (PCI_EXP_DEVCTL_CERE |
3883 PCI_EXP_DEVCTL_NFERE |
3884 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3885 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3886 }
3887
3888 err = pci_request_regions(pdev, DRV_NAME);
3889 if (err) {
3890 dev_err(&pdev->dev, "PCI region request failed.\n");
3891 goto err_out;
3892 }
3893
3894 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07003895 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003896 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07003897 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003898 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07003899 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003900 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07003901 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003902 }
3903
3904 if (err) {
3905 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3906 goto err_out;
3907 }
3908
3909 pci_set_drvdata(pdev, ndev);
3910 qdev->reg_base =
3911 ioremap_nocache(pci_resource_start(pdev, 1),
3912 pci_resource_len(pdev, 1));
3913 if (!qdev->reg_base) {
3914 dev_err(&pdev->dev, "Register mapping failed.\n");
3915 err = -ENOMEM;
3916 goto err_out;
3917 }
3918
3919 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3920 qdev->doorbell_area =
3921 ioremap_nocache(pci_resource_start(pdev, 3),
3922 pci_resource_len(pdev, 3));
3923 if (!qdev->doorbell_area) {
3924 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3925 err = -ENOMEM;
3926 goto err_out;
3927 }
3928
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003929 qdev->ndev = ndev;
3930 qdev->pdev = pdev;
Ron Mercere4552f52009-06-09 05:39:32 +00003931 err = ql_get_board_info(qdev);
3932 if (err) {
3933 dev_err(&pdev->dev, "Register access failed.\n");
3934 err = -EIO;
3935 goto err_out;
3936 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003937 qdev->msg_enable = netif_msg_init(debug, default_msg);
3938 spin_lock_init(&qdev->hw_lock);
3939 spin_lock_init(&qdev->stats_lock);
3940
3941 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003942 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003943 if (err) {
3944 dev_err(&pdev->dev, "Invalid FLASH.\n");
3945 goto err_out;
3946 }
3947
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003948 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3949
3950 /* Set up the default ring sizes. */
3951 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3952 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3953
3954 /* Set up the coalescing parameters. */
3955 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3956 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3957 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3958 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3959
3960 /*
3961 * Set up the operating parameters.
3962 */
3963 qdev->rx_csum = 1;
3964
3965 qdev->q_workqueue = create_workqueue(ndev->name);
3966 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3967 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3968 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3969 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003970 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003971 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer125844e2009-02-26 10:08:34 +00003972 mutex_init(&qdev->mpi_mutex);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003973 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003974
3975 if (!cards_found) {
3976 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3977 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3978 DRV_NAME, DRV_VERSION);
3979 }
3980 return 0;
3981err_out:
3982 ql_release_all(pdev);
3983 pci_disable_device(pdev);
3984 return err;
3985}
3986
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003987
3988static const struct net_device_ops qlge_netdev_ops = {
3989 .ndo_open = qlge_open,
3990 .ndo_stop = qlge_close,
3991 .ndo_start_xmit = qlge_send,
3992 .ndo_change_mtu = qlge_change_mtu,
3993 .ndo_get_stats = qlge_get_stats,
3994 .ndo_set_multicast_list = qlge_set_multicast_list,
3995 .ndo_set_mac_address = qlge_set_mac_address,
3996 .ndo_validate_addr = eth_validate_addr,
3997 .ndo_tx_timeout = qlge_tx_timeout,
3998 .ndo_vlan_rx_register = ql_vlan_rx_register,
3999 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
4000 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
4001};
4002
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004003static int __devinit qlge_probe(struct pci_dev *pdev,
4004 const struct pci_device_id *pci_entry)
4005{
4006 struct net_device *ndev = NULL;
4007 struct ql_adapter *qdev = NULL;
4008 static int cards_found = 0;
4009 int err = 0;
4010
Ron Mercer1e213302009-03-09 10:59:21 +00004011 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4012 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004013 if (!ndev)
4014 return -ENOMEM;
4015
4016 err = ql_init_device(pdev, ndev, cards_found);
4017 if (err < 0) {
4018 free_netdev(ndev);
4019 return err;
4020 }
4021
4022 qdev = netdev_priv(ndev);
4023 SET_NETDEV_DEV(ndev, &pdev->dev);
4024 ndev->features = (0
4025 | NETIF_F_IP_CSUM
4026 | NETIF_F_SG
4027 | NETIF_F_TSO
4028 | NETIF_F_TSO6
4029 | NETIF_F_TSO_ECN
4030 | NETIF_F_HW_VLAN_TX
4031 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004032 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004033
4034 if (test_bit(QL_DMA64, &qdev->flags))
4035 ndev->features |= NETIF_F_HIGHDMA;
4036
4037 /*
4038 * Set up net_device structure.
4039 */
4040 ndev->tx_queue_len = qdev->tx_ring_size;
4041 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004042
4043 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004044 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004045 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004046
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004047 err = register_netdev(ndev);
4048 if (err) {
4049 dev_err(&pdev->dev, "net device registration failed.\n");
4050 ql_release_all(pdev);
4051 pci_disable_device(pdev);
4052 return err;
4053 }
Ron Mercer6a473302009-07-02 06:06:12 +00004054 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004055 ql_display_dev_info(ndev);
4056 cards_found++;
4057 return 0;
4058}
4059
4060static void __devexit qlge_remove(struct pci_dev *pdev)
4061{
4062 struct net_device *ndev = pci_get_drvdata(pdev);
4063 unregister_netdev(ndev);
4064 ql_release_all(pdev);
4065 pci_disable_device(pdev);
4066 free_netdev(ndev);
4067}
4068
4069/*
4070 * This callback is called by the PCI subsystem whenever
4071 * a PCI bus error is detected.
4072 */
4073static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4074 enum pci_channel_state state)
4075{
4076 struct net_device *ndev = pci_get_drvdata(pdev);
4077 struct ql_adapter *qdev = netdev_priv(ndev);
4078
4079 if (netif_running(ndev))
4080 ql_adapter_down(qdev);
4081
4082 pci_disable_device(pdev);
4083
4084 /* Request a slot reset. */
4085 return PCI_ERS_RESULT_NEED_RESET;
4086}
4087
4088/*
4089 * This callback is called after the PCI buss has been reset.
4090 * Basically, this tries to restart the card from scratch.
4091 * This is a shortened version of the device probe/discovery code,
4092 * it resembles the first-half of the () routine.
4093 */
4094static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4095{
4096 struct net_device *ndev = pci_get_drvdata(pdev);
4097 struct ql_adapter *qdev = netdev_priv(ndev);
4098
4099 if (pci_enable_device(pdev)) {
4100 QPRINTK(qdev, IFUP, ERR,
4101 "Cannot re-enable PCI device after reset.\n");
4102 return PCI_ERS_RESULT_DISCONNECT;
4103 }
4104
4105 pci_set_master(pdev);
4106
4107 netif_carrier_off(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004108 ql_adapter_reset(qdev);
4109
4110 /* Make sure the EEPROM is good */
4111 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4112
4113 if (!is_valid_ether_addr(ndev->perm_addr)) {
4114 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4115 return PCI_ERS_RESULT_DISCONNECT;
4116 }
4117
4118 return PCI_ERS_RESULT_RECOVERED;
4119}
4120
4121static void qlge_io_resume(struct pci_dev *pdev)
4122{
4123 struct net_device *ndev = pci_get_drvdata(pdev);
4124 struct ql_adapter *qdev = netdev_priv(ndev);
4125
4126 pci_set_master(pdev);
4127
4128 if (netif_running(ndev)) {
4129 if (ql_adapter_up(qdev)) {
4130 QPRINTK(qdev, IFUP, ERR,
4131 "Device initialization failed after reset.\n");
4132 return;
4133 }
4134 }
4135
4136 netif_device_attach(ndev);
4137}
4138
4139static struct pci_error_handlers qlge_err_handler = {
4140 .error_detected = qlge_io_error_detected,
4141 .slot_reset = qlge_io_slot_reset,
4142 .resume = qlge_io_resume,
4143};
4144
4145static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4146{
4147 struct net_device *ndev = pci_get_drvdata(pdev);
4148 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004149 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004150
4151 netif_device_detach(ndev);
4152
4153 if (netif_running(ndev)) {
4154 err = ql_adapter_down(qdev);
4155 if (!err)
4156 return err;
4157 }
4158
4159 err = pci_save_state(pdev);
4160 if (err)
4161 return err;
4162
4163 pci_disable_device(pdev);
4164
4165 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4166
4167 return 0;
4168}
4169
David S. Miller04da2cf2008-09-19 16:14:24 -07004170#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004171static int qlge_resume(struct pci_dev *pdev)
4172{
4173 struct net_device *ndev = pci_get_drvdata(pdev);
4174 struct ql_adapter *qdev = netdev_priv(ndev);
4175 int err;
4176
4177 pci_set_power_state(pdev, PCI_D0);
4178 pci_restore_state(pdev);
4179 err = pci_enable_device(pdev);
4180 if (err) {
4181 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4182 return err;
4183 }
4184 pci_set_master(pdev);
4185
4186 pci_enable_wake(pdev, PCI_D3hot, 0);
4187 pci_enable_wake(pdev, PCI_D3cold, 0);
4188
4189 if (netif_running(ndev)) {
4190 err = ql_adapter_up(qdev);
4191 if (err)
4192 return err;
4193 }
4194
4195 netif_device_attach(ndev);
4196
4197 return 0;
4198}
David S. Miller04da2cf2008-09-19 16:14:24 -07004199#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004200
4201static void qlge_shutdown(struct pci_dev *pdev)
4202{
4203 qlge_suspend(pdev, PMSG_SUSPEND);
4204}
4205
4206static struct pci_driver qlge_driver = {
4207 .name = DRV_NAME,
4208 .id_table = qlge_pci_tbl,
4209 .probe = qlge_probe,
4210 .remove = __devexit_p(qlge_remove),
4211#ifdef CONFIG_PM
4212 .suspend = qlge_suspend,
4213 .resume = qlge_resume,
4214#endif
4215 .shutdown = qlge_shutdown,
4216 .err_handler = &qlge_err_handler
4217};
4218
4219static int __init qlge_init_module(void)
4220{
4221 return pci_register_driver(&qlge_driver);
4222}
4223
4224static void __exit qlge_exit(void)
4225{
4226 pci_unregister_driver(&qlge_driver);
4227}
4228
4229module_init(qlge_init_module);
4230module_exit(qlge_exit);