blob: 4a075484e151d01e76157c6877faf6cc94dfab18 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000077 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
Ron Mercer4322c5b2009-07-02 06:06:06 +0000216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400354 case MAC_ADDR_TYPE_CAM_MAC:
355 {
356 u32 cam_output;
357 u32 upper = (addr[0] << 8) | addr[1];
358 u32 lower =
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]);
361
Ron Mercer49740972009-02-26 10:08:36 +0000362 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700363 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400364 " at index %d in the CAM.\n",
365 ((type ==
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700367 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400368
369 status =
370 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800371 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 if (status)
373 goto exit;
374 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375 (index << MAC_ADDR_IDX_SHIFT) | /* index */
376 type); /* type */
377 ql_write32(qdev, MAC_ADDR_DATA, lower);
378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, upper);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
398 */
Ron Mercer76b26692009-10-08 09:54:40 +0000399 cam_output = (CAM_OUT_ROUTE_NIC |
400 (qdev->
401 func << CAM_OUT_FUNC_SHIFT) |
402 (0 << CAM_OUT_CQ_ID_SHIFT));
403 if (qdev->vlgrp)
404 cam_output |= CAM_OUT_RV;
405 /* route to NIC core */
406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400407 break;
408 }
409 case MAC_ADDR_TYPE_VLAN:
410 {
411 u32 enable_bit = *((u32 *) &addr[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
416 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit ? "Adding" : "Removing"),
419 index, (enable_bit ? "to" : "from"));
420
421 status =
422 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800423 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400424 if (status)
425 goto exit;
426 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427 (index << MAC_ADDR_IDX_SHIFT) | /* index */
428 type | /* type */
429 enable_bit); /* enable/disable */
430 break;
431 }
432 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default:
434 QPRINTK(qdev, IFUP, CRIT,
435 "Address type %d not yet supported.\n", type);
436 status = -EPERM;
437 }
438exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400439 return status;
440}
441
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000442/* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
445 */
446static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447{
448 int status;
449 char zero_mac_addr[ETH_ALEN];
450 char *addr;
451
452 if (set) {
453 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG,
462 "Clearing MAC address on %s\n",
463 qdev->ndev->name);
464 }
465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466 if (status)
467 return status;
468 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status)
472 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473 "address.\n");
474 return status;
475}
476
Ron Mercer6a473302009-07-02 06:06:12 +0000477void ql_link_on(struct ql_adapter *qdev)
478{
479 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480 qdev->ndev->name);
481 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1);
483}
484
485void ql_link_off(struct ql_adapter *qdev)
486{
487 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488 qdev->ndev->name);
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 QPRINTK(qdev, IFUP, DEBUG,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable ? "Adding" : "Removing"),
528 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530 ((index ==
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545 (enable ? "to" : "from"));
546
547 switch (mask) {
548 case RT_IDX_CAM_HIT:
549 {
550 value = RT_IDX_DST_CAM_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break;
554 }
555 case RT_IDX_VALID: /* Promiscuous Mode frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560 break;
561 }
562 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
563 {
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
577 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000578 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
583 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
584 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000585 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588 break;
589 }
590 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
591 {
592 value = RT_IDX_DST_RSS | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595 break;
596 }
597 case 0: /* Clear the E-bit on an entry. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (index << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 default:
605 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606 mask);
607 status = -EPERM;
608 goto exit;
609 }
610
611 if (value) {
612 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613 if (status)
614 goto exit;
615 value |= (enable ? RT_IDX_E : 0);
616 ql_write32(qdev, RT_IDX, value);
617 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 }
619exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400620 return status;
621}
622
623static void ql_enable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626}
627
628static void ql_disable_interrupts(struct ql_adapter *qdev)
629{
630 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631}
632
633/* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
638 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700639u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400640{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700641 u32 var = 0;
642 unsigned long hw_flags = 0;
643 struct intr_context *ctx = qdev->intr_context + intr;
644
645 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
648 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700650 ctx->intr_en_mask);
651 var = ql_read32(qdev, STS);
652 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400653 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700654
655 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656 if (atomic_dec_and_test(&ctx->irq_cnt)) {
657 ql_write32(qdev, INTR_EN,
658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 }
661 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663}
664
665static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666{
667 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674 return 0;
675
676 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000677 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700680 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 var = ql_read32(qdev, STS);
682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000684 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400685 return var;
686}
687
688static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689{
690 int i;
691 for (i = 0; i < qdev->intr_count; i++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
695 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700696 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697 i == 0))
698 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400699 ql_enable_completion_interrupt(qdev, i);
700 }
701
702}
703
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000704static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705{
706 int status, i;
707 u16 csum = 0;
708 __le16 *flash = (__le16 *)&qdev->flash;
709
710 status = strncmp((char *)&qdev->flash, str, 4);
711 if (status) {
712 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713 return status;
714 }
715
716 for (i = 0; i < size; i++)
717 csum += le16_to_cpu(*flash++);
718
719 if (csum)
720 QPRINTK(qdev, IFUP, ERR,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723 return csum;
724}
725
Ron Mercer26351472009-02-02 13:53:57 -0800726static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400727{
728 int status = 0;
729 /* wait for reg to come ready */
730 status = ql_wait_reg_rdy(qdev,
731 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732 if (status)
733 goto exit;
734 /* set up for reg read */
735 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736 /* wait for reg to come ready */
737 status = ql_wait_reg_rdy(qdev,
738 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739 if (status)
740 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
744 */
745 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400746exit:
747 return status;
748}
749
Ron Mercercdca8d02009-03-02 08:07:31 +0000750static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751{
752 u32 i, size;
753 int status;
754 __le32 *p = (__le32 *)&qdev->flash;
755 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000756 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000757
758 /* Get flash offset for function and adjust
759 * for dword access.
760 */
Ron Mercere4552f52009-06-09 05:39:32 +0000761 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000762 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763 else
764 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767 return -ETIMEDOUT;
768
769 size = sizeof(struct flash_params_8000) / sizeof(u32);
770 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p);
772 if (status) {
773 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774 goto exit;
775 }
776 }
777
778 status = ql_validate_flash(qdev,
779 sizeof(struct flash_params_8000) / sizeof(u16),
780 "8000");
781 if (status) {
782 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783 status = -EINVAL;
784 goto exit;
785 }
786
Ron Mercer542512e2009-06-09 05:39:33 +0000787 /* Extract either manufacturer or BOFM modified
788 * MAC address.
789 */
790 if (qdev->flash.flash_params_8000.data_type1 == 2)
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr1,
793 qdev->ndev->addr_len);
794 else
795 memcpy(mac_addr,
796 qdev->flash.flash_params_8000.mac_addr,
797 qdev->ndev->addr_len);
798
799 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000800 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801 status = -EINVAL;
802 goto exit;
803 }
804
805 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000806 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000807 qdev->ndev->addr_len);
808
809exit:
810 ql_sem_unlock(qdev, SEM_FLASH_MASK);
811 return status;
812}
813
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000814static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400815{
816 int i;
817 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800818 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800819 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000820 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800821
822 /* Second function's parameters follow the first
823 * function's.
824 */
Ron Mercere4552f52009-06-09 05:39:32 +0000825 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400827
828 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829 return -ETIMEDOUT;
830
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000831 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800832 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400833 if (status) {
834 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835 goto exit;
836 }
837
838 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000839
840 status = ql_validate_flash(qdev,
841 sizeof(struct flash_params_8012) / sizeof(u16),
842 "8012");
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845 status = -EINVAL;
846 goto exit;
847 }
848
849 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 status = -EINVAL;
851 goto exit;
852 }
853
854 memcpy(qdev->ndev->dev_addr,
855 qdev->flash.flash_params_8012.mac_addr,
856 qdev->ndev->addr_len);
857
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400858exit:
859 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 return status;
861}
862
863/* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
866 */
867static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868{
869 int status;
870 /* wait for reg to come ready */
871 status = ql_wait_reg_rdy(qdev,
872 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 if (status)
874 return status;
875 /* write the data to the data reg */
876 ql_write32(qdev, XGMAC_DATA, data);
877 /* trigger the write */
878 ql_write32(qdev, XGMAC_ADDR, reg);
879 return status;
880}
881
882/* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
885 */
886int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887{
888 int status = 0;
889 /* wait for reg to come ready */
890 status = ql_wait_reg_rdy(qdev,
891 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892 if (status)
893 goto exit;
894 /* set up for reg read */
895 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 goto exit;
901 /* get the data */
902 *data = ql_read32(qdev, XGMAC_DATA);
903exit:
904 return status;
905}
906
907/* This is used for reading the 64-bit statistics regs. */
908int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909{
910 int status = 0;
911 u32 hi = 0;
912 u32 lo = 0;
913
914 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 if (status)
916 goto exit;
917
918 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 if (status)
920 goto exit;
921
922 *data = (u64) lo | ((u64) hi << 32);
923
924exit:
925 return status;
926}
927
Ron Mercercdca8d02009-03-02 08:07:31 +0000928static int ql_8000_port_initialize(struct ql_adapter *qdev)
929{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000930 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000931 /*
932 * Get MPI firmware version for driver banner
933 * and ethool info.
934 */
935 status = ql_mb_about_fw(qdev);
936 if (status)
937 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000938 status = ql_mb_get_fw_state(qdev);
939 if (status)
940 goto exit;
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943exit:
944 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000945}
946
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400947/* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
951 * later date.
952 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000953static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400954{
955 int status = 0;
956 u32 data;
957
958 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
961 */
962 QPRINTK(qdev, LINK, INFO,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965 if (status) {
966 QPRINTK(qdev, LINK, CRIT,
967 "Port initialize timed out.\n");
968 }
969 return status;
970 }
971
972 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975 if (status)
976 goto end;
977 data |= GLOBAL_CFG_RESET;
978 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 if (status)
980 goto end;
981
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
984 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
985 data |= GLOBAL_CFG_TX_STAT_EN;
986 data |= GLOBAL_CFG_RX_STAT_EN;
987 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 if (status)
989 goto end;
990
991 /* Enable transmitter, and clear it's reset. */
992 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993 if (status)
994 goto end;
995 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
996 data |= TX_CFG_EN; /* Enable the transmitter. */
997 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable receiver and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1006 data |= RX_CFG_EN; /* Enable the receiver. */
1007 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Turn on jumbo. */
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 if (status)
1015 goto end;
1016 status =
1017 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 if (status)
1019 goto end;
1020
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023end:
1024 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 return status;
1026}
1027
Ron Mercer7c734352009-10-19 03:32:19 +00001028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001033/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001035{
1036 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1037 rx_ring->lbq_curr_idx++;
1038 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1039 rx_ring->lbq_curr_idx = 0;
1040 rx_ring->lbq_free_cnt++;
1041 return lbq_desc;
1042}
1043
Ron Mercer7c734352009-10-19 03:32:19 +00001044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001068{
1069 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1070 rx_ring->sbq_curr_idx++;
1071 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1072 rx_ring->sbq_curr_idx = 0;
1073 rx_ring->sbq_free_cnt++;
1074 return sbq_desc;
1075}
1076
1077/* Update an rx ring index. */
1078static void ql_update_cq(struct rx_ring *rx_ring)
1079{
1080 rx_ring->cnsmr_idx++;
1081 rx_ring->curr_entry++;
1082 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1083 rx_ring->cnsmr_idx = 0;
1084 rx_ring->curr_entry = rx_ring->cq_base;
1085 }
1086}
1087
1088static void ql_write_cq_idx(struct rx_ring *rx_ring)
1089{
1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1091}
1092
Ron Mercer7c734352009-10-19 03:32:19 +00001093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001140/* Process (refill) a large buffer queue. */
1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1142{
Ron Mercer49f21862009-02-23 10:42:16 +00001143 u32 clean_idx = rx_ring->lbq_clean_idx;
1144 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001145 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001146 u64 map;
1147 int i;
1148
Ron Mercer7c734352009-10-19 03:32:19 +00001149 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 for (i = 0; i < 16; i++) {
1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1152 "lbq: try cleaning clean_idx = %d.\n",
1153 clean_idx);
1154 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1156 QPRINTK(qdev, IFUP, ERR,
1157 "Could not get a page chunk.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001158 return;
1159 }
Ron Mercer7c734352009-10-19 03:32:19 +00001160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
Ron Mercer7c734352009-10-19 03:32:19 +00001164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001166 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001184 QPRINTK(qdev, RX_STATUS, DEBUG,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
Ron Mercer49f21862009-02-23 10:42:16 +00001195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = 0; i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 QPRINTK(qdev, RX_STATUS, DEBUG,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 if (sbq_desc->p.skb == NULL) {
1208 QPRINTK(qdev, RX_STATUS, DEBUG,
1209 "sbq: getting new skb for index %d.\n",
1210 sbq_desc->index);
1211 sbq_desc->p.skb =
1212 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001213 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001214 if (sbq_desc->p.skb == NULL) {
1215 QPRINTK(qdev, PROBE, ERR,
1216 "Couldn't get an skb.\n");
1217 rx_ring->sbq_clean_idx = clean_idx;
1218 return;
1219 }
1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221 map = pci_map_single(qdev->pdev,
1222 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001223 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001225 if (pci_dma_mapping_error(qdev->pdev, map)) {
1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001232 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1233 pci_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001250 QPRINTK(qdev, RX_STATUS, DEBUG,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
1284 QPRINTK(qdev, TX_DONE, DEBUG,
1285 "unmapping OAL area.\n");
1286 }
1287 pci_unmap_single(qdev->pdev,
1288 pci_unmap_addr(&tx_ring_desc->map[i],
1289 mapaddr),
1290 pci_unmap_len(&tx_ring_desc->map[i],
1291 maplen),
1292 PCI_DMA_TODEVICE);
1293 } else {
1294 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1295 i);
1296 pci_unmap_page(qdev->pdev,
1297 pci_unmap_addr(&tx_ring_desc->map[i],
1298 mapaddr),
1299 pci_unmap_len(&tx_ring_desc->map[i],
1300 maplen), PCI_DMA_TODEVICE);
1301 }
1302 }
1303
1304}
1305
1306/* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308 */
1309static int ql_map_send(struct ql_adapter *qdev,
1310 struct ob_mac_iocb_req *mac_iocb_ptr,
1311 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312{
1313 int len = skb_headlen(skb);
1314 dma_addr_t map;
1315 int frag_idx, err, map_idx = 0;
1316 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319 if (frag_cnt) {
1320 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
1329 QPRINTK(qdev, TX_QUEUED, ERR,
1330 "PCI mapping failed with error: %d\n", err);
1331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
1337 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
1376 QPRINTK(qdev, TX_QUEUED, ERR,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
1379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
1391 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392 map);
1393 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
1399 map =
1400 pci_map_page(qdev->pdev, frag->page,
1401 frag->page_offset, frag->size,
1402 PCI_DMA_TODEVICE);
1403
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) {
1406 QPRINTK(qdev, TX_QUEUED, ERR,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
1409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(frag->size);
1414 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 frag->size);
1417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001436static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001437{
1438 void *temp_addr = skb->data;
1439
1440 /* Undo the skb_reserve(skb,32) we did before
1441 * giving to hardware, and realign data on
1442 * a 2-byte boundary.
1443 */
1444 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1445 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1446 skb_copy_to_linear_data(skb, temp_addr,
1447 (unsigned int)len);
1448}
1449
1450/*
1451 * This function builds an skb for the given inbound
1452 * completion. It will be rewritten for readability in the near
1453 * future, but for not it works well.
1454 */
1455static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1456 struct rx_ring *rx_ring,
1457 struct ib_mac_iocb_rsp *ib_mac_rsp)
1458{
1459 struct bq_desc *lbq_desc;
1460 struct bq_desc *sbq_desc;
1461 struct sk_buff *skb = NULL;
1462 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1463 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1464
1465 /*
1466 * Handle the header buffer if present.
1467 */
1468 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1469 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1470 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1471 /*
1472 * Headers fit nicely into a small buffer.
1473 */
1474 sbq_desc = ql_get_curr_sbuf(rx_ring);
1475 pci_unmap_single(qdev->pdev,
1476 pci_unmap_addr(sbq_desc, mapaddr),
1477 pci_unmap_len(sbq_desc, maplen),
1478 PCI_DMA_FROMDEVICE);
1479 skb = sbq_desc->p.skb;
1480 ql_realign_skb(skb, hdr_len);
1481 skb_put(skb, hdr_len);
1482 sbq_desc->p.skb = NULL;
1483 }
1484
1485 /*
1486 * Handle the data buffer(s).
1487 */
1488 if (unlikely(!length)) { /* Is there data too? */
1489 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "No Data buffer in this packet.\n");
1491 return skb;
1492 }
1493
1494 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1495 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1496 QPRINTK(qdev, RX_STATUS, DEBUG,
1497 "Headers in small, data of %d bytes in small, combine them.\n", length);
1498 /*
1499 * Data is less than small buffer size so it's
1500 * stuffed in a small buffer.
1501 * For this case we append the data
1502 * from the "data" small buffer to the "header" small
1503 * buffer.
1504 */
1505 sbq_desc = ql_get_curr_sbuf(rx_ring);
1506 pci_dma_sync_single_for_cpu(qdev->pdev,
1507 pci_unmap_addr
1508 (sbq_desc, mapaddr),
1509 pci_unmap_len
1510 (sbq_desc, maplen),
1511 PCI_DMA_FROMDEVICE);
1512 memcpy(skb_put(skb, length),
1513 sbq_desc->p.skb->data, length);
1514 pci_dma_sync_single_for_device(qdev->pdev,
1515 pci_unmap_addr
1516 (sbq_desc,
1517 mapaddr),
1518 pci_unmap_len
1519 (sbq_desc,
1520 maplen),
1521 PCI_DMA_FROMDEVICE);
1522 } else {
1523 QPRINTK(qdev, RX_STATUS, DEBUG,
1524 "%d bytes in a single small buffer.\n", length);
1525 sbq_desc = ql_get_curr_sbuf(rx_ring);
1526 skb = sbq_desc->p.skb;
1527 ql_realign_skb(skb, length);
1528 skb_put(skb, length);
1529 pci_unmap_single(qdev->pdev,
1530 pci_unmap_addr(sbq_desc,
1531 mapaddr),
1532 pci_unmap_len(sbq_desc,
1533 maplen),
1534 PCI_DMA_FROMDEVICE);
1535 sbq_desc->p.skb = NULL;
1536 }
1537 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1538 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1539 QPRINTK(qdev, RX_STATUS, DEBUG,
1540 "Header in small, %d bytes in large. Chain large to small!\n", length);
1541 /*
1542 * The data is in a single large buffer. We
1543 * chain it to the header buffer's skb and let
1544 * it rip.
1545 */
Ron Mercer7c734352009-10-19 03:32:19 +00001546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001547 QPRINTK(qdev, RX_STATUS, DEBUG,
Ron Mercer7c734352009-10-19 03:32:19 +00001548 "Chaining page at offset = %d,"
1549 "for %d bytes to skb.\n",
1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset,
1553 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001554 skb->len += length;
1555 skb->data_len += length;
1556 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001557 } else {
1558 /*
1559 * The headers and data are in a single large buffer. We
1560 * copy it to a new skb and let it go. This can happen with
1561 * jumbo mtu on a non-TCP/UDP frame.
1562 */
Ron Mercer7c734352009-10-19 03:32:19 +00001563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001564 skb = netdev_alloc_skb(qdev->ndev, length);
1565 if (skb == NULL) {
1566 QPRINTK(qdev, PROBE, DEBUG,
1567 "No skb available, drop the packet.\n");
1568 return NULL;
1569 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001570 pci_unmap_page(qdev->pdev,
1571 pci_unmap_addr(lbq_desc,
1572 mapaddr),
1573 pci_unmap_len(lbq_desc, maplen),
1574 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001575 skb_reserve(skb, NET_IP_ALIGN);
1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
Ron Mercer7c734352009-10-19 03:32:19 +00001578 skb_fill_page_desc(skb, 0,
1579 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset,
1581 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001582 skb->len += length;
1583 skb->data_len += length;
1584 skb->truesize += length;
1585 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001586 __pskb_pull_tail(skb,
1587 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1588 VLAN_ETH_HLEN : ETH_HLEN);
1589 }
1590 } else {
1591 /*
1592 * The data is in a chain of large buffers
1593 * pointed to by a small buffer. We loop
1594 * thru and chain them to the our small header
1595 * buffer's skb.
1596 * frags: There are 18 max frags and our small
1597 * buffer will hold 32 of them. The thing is,
1598 * we'll use 3 max for our 9000 byte jumbo
1599 * frames. If the MTU goes up we could
1600 * eventually be in trouble.
1601 */
Ron Mercer7c734352009-10-19 03:32:19 +00001602 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001603 sbq_desc = ql_get_curr_sbuf(rx_ring);
1604 pci_unmap_single(qdev->pdev,
1605 pci_unmap_addr(sbq_desc, mapaddr),
1606 pci_unmap_len(sbq_desc, maplen),
1607 PCI_DMA_FROMDEVICE);
1608 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1609 /*
1610 * This is an non TCP/UDP IP frame, so
1611 * the headers aren't split into a small
1612 * buffer. We have to use the small buffer
1613 * that contains our sg list as our skb to
1614 * send upstairs. Copy the sg list here to
1615 * a local buffer and use it to find the
1616 * pages to chain.
1617 */
1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1619 "%d bytes of headers & data in chain of large.\n", length);
1620 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001621 sbq_desc->p.skb = NULL;
1622 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001623 }
1624 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001625 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1626 size = (length < rx_ring->lbq_buf_size) ? length :
1627 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001628
1629 QPRINTK(qdev, RX_STATUS, DEBUG,
1630 "Adding page %d to skb for %d bytes.\n",
1631 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001632 skb_fill_page_desc(skb, i,
1633 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset,
1635 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001636 skb->len += size;
1637 skb->data_len += size;
1638 skb->truesize += size;
1639 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001640 i++;
1641 }
1642 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1643 VLAN_ETH_HLEN : ETH_HLEN);
1644 }
1645 return skb;
1646}
1647
1648/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp)
1652{
1653 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL;
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001657
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659
1660 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1661 if (unlikely(!skb)) {
1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1663 "No skb available, drop packet.\n");
1664 return;
1665 }
1666
Ron Mercera32959c2009-06-09 05:39:27 +00001667 /* Frame error, so drop the packet. */
1668 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1669 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1670 ib_mac_rsp->flags2);
1671 dev_kfree_skb_any(skb);
1672 return;
1673 }
Ron Mercerec33a492009-06-09 05:39:28 +00001674
1675 /* The max framesize filter on this chip is set higher than
1676 * MTU since FCoE uses 2k frames.
1677 */
1678 if (skb->len > ndev->mtu + ETH_HLEN) {
1679 dev_kfree_skb_any(skb);
1680 return;
1681 }
1682
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001683 /* loopback self test for ethtool */
1684 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1685 ql_check_lb_frame(qdev, skb);
1686 dev_kfree_skb_any(skb);
1687 return;
1688 }
1689
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001690 prefetch(skb->data);
1691 skb->dev = ndev;
1692 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1693 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1694 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1696 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1698 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1699 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1700 }
1701 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1702 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1703 }
Ron Mercerd555f592009-03-09 10:59:19 +00001704
Ron Mercerd555f592009-03-09 10:59:19 +00001705 skb->protocol = eth_type_trans(skb, ndev);
1706 skb->ip_summed = CHECKSUM_NONE;
1707
1708 /* If rx checksum is on, and there are no
1709 * csum or frame errors.
1710 */
1711 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001712 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713 /* TCP frame. */
1714 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715 QPRINTK(qdev, RX_STATUS, DEBUG,
1716 "TCP checksum done!\n");
1717 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720 /* Unfragmented ipv4 UDP frame. */
1721 struct iphdr *iph = (struct iphdr *) skb->data;
1722 if (!(iph->frag_off &
1723 cpu_to_be16(IP_MF|IP_OFFSET))) {
1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1725 QPRINTK(qdev, RX_STATUS, DEBUG,
1726 "TCP checksum done!\n");
1727 }
1728 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001729 }
Ron Mercerd555f592009-03-09 10:59:19 +00001730
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001731 ndev->stats.rx_packets++;
1732 ndev->stats.rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001733 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001734 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1735 if (qdev->vlgrp &&
1736 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1737 (vlan_id != 0))
1738 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1739 vlan_id, skb);
1740 else
1741 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001742 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001743 if (qdev->vlgrp &&
1744 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1745 (vlan_id != 0))
1746 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1747 else
1748 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001749 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750}
1751
1752/* Process an outbound completion from an rx ring. */
1753static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1754 struct ob_mac_iocb_rsp *mac_rsp)
1755{
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001756 struct net_device *ndev = qdev->ndev;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001757 struct tx_ring *tx_ring;
1758 struct tx_ring_desc *tx_ring_desc;
1759
1760 QL_DUMP_OB_MAC_RSP(mac_rsp);
1761 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1762 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1763 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001764 ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1765 ndev->stats.tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001766 dev_kfree_skb(tx_ring_desc->skb);
1767 tx_ring_desc->skb = NULL;
1768
1769 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1770 OB_MAC_IOCB_RSP_S |
1771 OB_MAC_IOCB_RSP_L |
1772 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1773 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1774 QPRINTK(qdev, TX_DONE, WARNING,
1775 "Total descriptor length did not match transfer length.\n");
1776 }
1777 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1778 QPRINTK(qdev, TX_DONE, WARNING,
1779 "Frame too short to be legal, not sent.\n");
1780 }
1781 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1782 QPRINTK(qdev, TX_DONE, WARNING,
1783 "Frame too long, but sent anyway.\n");
1784 }
1785 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1786 QPRINTK(qdev, TX_DONE, WARNING,
1787 "PCI backplane error. Frame not sent.\n");
1788 }
1789 }
1790 atomic_inc(&tx_ring->tx_count);
1791}
1792
1793/* Fire up a handler to reset the MPI processor. */
1794void ql_queue_fw_error(struct ql_adapter *qdev)
1795{
Ron Mercer6a473302009-07-02 06:06:12 +00001796 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001797 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1798}
1799
1800void ql_queue_asic_error(struct ql_adapter *qdev)
1801{
Ron Mercer6a473302009-07-02 06:06:12 +00001802 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001803 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001804 /* Clear adapter up bit to signal the recovery
1805 * process that it shouldn't kill the reset worker
1806 * thread
1807 */
1808 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001809 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1810}
1811
1812static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1813 struct ib_ae_iocb_rsp *ib_ae_rsp)
1814{
1815 switch (ib_ae_rsp->event) {
1816 case MGMT_ERR_EVENT:
1817 QPRINTK(qdev, RX_ERR, ERR,
1818 "Management Processor Fatal Error.\n");
1819 ql_queue_fw_error(qdev);
1820 return;
1821
1822 case CAM_LOOKUP_ERR_EVENT:
1823 QPRINTK(qdev, LINK, ERR,
1824 "Multiple CAM hits lookup occurred.\n");
1825 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1826 ql_queue_asic_error(qdev);
1827 return;
1828
1829 case SOFT_ECC_ERROR_EVENT:
1830 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1831 ql_queue_asic_error(qdev);
1832 break;
1833
1834 case PCI_ERR_ANON_BUF_RD:
1835 QPRINTK(qdev, RX_ERR, ERR,
1836 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1837 ib_ae_rsp->q_id);
1838 ql_queue_asic_error(qdev);
1839 break;
1840
1841 default:
1842 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1843 ib_ae_rsp->event);
1844 ql_queue_asic_error(qdev);
1845 break;
1846 }
1847}
1848
1849static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1850{
1851 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001852 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001853 struct ob_mac_iocb_rsp *net_rsp = NULL;
1854 int count = 0;
1855
Ron Mercer1e213302009-03-09 10:59:21 +00001856 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001857 /* While there are entries in the completion queue. */
1858 while (prod != rx_ring->cnsmr_idx) {
1859
1860 QPRINTK(qdev, RX_STATUS, DEBUG,
1861 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1862 prod, rx_ring->cnsmr_idx);
1863
1864 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1865 rmb();
1866 switch (net_rsp->opcode) {
1867
1868 case OPCODE_OB_MAC_TSO_IOCB:
1869 case OPCODE_OB_MAC_IOCB:
1870 ql_process_mac_tx_intr(qdev, net_rsp);
1871 break;
1872 default:
1873 QPRINTK(qdev, RX_STATUS, DEBUG,
1874 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1875 net_rsp->opcode);
1876 }
1877 count++;
1878 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001879 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001880 }
1881 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00001882 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1883 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1884 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001885 if (atomic_read(&tx_ring->queue_stopped) &&
1886 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1887 /*
1888 * The queue got stopped because the tx_ring was full.
1889 * Wake it up, because it's now at least 25% empty.
1890 */
Ron Mercer1e213302009-03-09 10:59:21 +00001891 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001892 }
1893
1894 return count;
1895}
1896
1897static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1898{
1899 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001900 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001901 struct ql_net_rsp_iocb *net_rsp;
1902 int count = 0;
1903
1904 /* While there are entries in the completion queue. */
1905 while (prod != rx_ring->cnsmr_idx) {
1906
1907 QPRINTK(qdev, RX_STATUS, DEBUG,
1908 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1909 prod, rx_ring->cnsmr_idx);
1910
1911 net_rsp = rx_ring->curr_entry;
1912 rmb();
1913 switch (net_rsp->opcode) {
1914 case OPCODE_IB_MAC_IOCB:
1915 ql_process_mac_rx_intr(qdev, rx_ring,
1916 (struct ib_mac_iocb_rsp *)
1917 net_rsp);
1918 break;
1919
1920 case OPCODE_IB_AE_IOCB:
1921 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1922 net_rsp);
1923 break;
1924 default:
1925 {
1926 QPRINTK(qdev, RX_STATUS, DEBUG,
1927 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1928 net_rsp->opcode);
1929 }
1930 }
1931 count++;
1932 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001933 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001934 if (count == budget)
1935 break;
1936 }
1937 ql_update_buffer_queues(qdev, rx_ring);
1938 ql_write_cq_idx(rx_ring);
1939 return count;
1940}
1941
1942static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1943{
1944 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1945 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00001946 struct rx_ring *trx_ring;
1947 int i, work_done = 0;
1948 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001949
1950 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1951 rx_ring->cq_id);
1952
Ron Mercer39aa8162009-08-27 11:02:11 +00001953 /* Service the TX rings first. They start
1954 * right after the RSS rings. */
1955 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1956 trx_ring = &qdev->rx_ring[i];
1957 /* If this TX completion ring belongs to this vector and
1958 * it's not empty then service it.
1959 */
1960 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1961 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1962 trx_ring->cnsmr_idx)) {
1963 QPRINTK(qdev, INTR, DEBUG,
1964 "%s: Servicing TX completion ring %d.\n",
1965 __func__, trx_ring->cq_id);
1966 ql_clean_outbound_rx_ring(trx_ring);
1967 }
1968 }
1969
1970 /*
1971 * Now service the RSS ring if it's active.
1972 */
1973 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1974 rx_ring->cnsmr_idx) {
1975 QPRINTK(qdev, INTR, DEBUG,
1976 "%s: Servicing RX completion ring %d.\n",
1977 __func__, rx_ring->cq_id);
1978 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1979 }
1980
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001981 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001982 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001983 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1984 }
1985 return work_done;
1986}
1987
Ron Mercer01e6b952009-10-30 12:13:34 +00001988static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001989{
1990 struct ql_adapter *qdev = netdev_priv(ndev);
1991
1992 qdev->vlgrp = grp;
1993 if (grp) {
1994 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1995 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1996 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1997 } else {
1998 QPRINTK(qdev, IFUP, DEBUG,
1999 "Turning off VLAN in NIC_RCV_CFG.\n");
2000 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2001 }
2002}
2003
Ron Mercer01e6b952009-10-30 12:13:34 +00002004static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002005{
2006 struct ql_adapter *qdev = netdev_priv(ndev);
2007 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002008 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002009
Ron Mercercc288f52009-02-23 10:42:14 +00002010 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2011 if (status)
2012 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002013 if (ql_set_mac_addr_reg
2014 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2015 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2016 }
Ron Mercercc288f52009-02-23 10:42:14 +00002017 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002018}
2019
Ron Mercer01e6b952009-10-30 12:13:34 +00002020static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002021{
2022 struct ql_adapter *qdev = netdev_priv(ndev);
2023 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002024 int status;
2025
2026 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2027 if (status)
2028 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002029
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002030 if (ql_set_mac_addr_reg
2031 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2032 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2033 }
Ron Mercercc288f52009-02-23 10:42:14 +00002034 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002035
2036}
2037
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002038/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2039static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2040{
2041 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002042 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002043 return IRQ_HANDLED;
2044}
2045
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002046/* This handles a fatal error, MPI activity, and the default
2047 * rx_ring in an MSI-X multiple vector environment.
2048 * In MSI/Legacy environment it also process the rest of
2049 * the rx_rings.
2050 */
2051static irqreturn_t qlge_isr(int irq, void *dev_id)
2052{
2053 struct rx_ring *rx_ring = dev_id;
2054 struct ql_adapter *qdev = rx_ring->qdev;
2055 struct intr_context *intr_context = &qdev->intr_context[0];
2056 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002057 int work_done = 0;
2058
Ron Mercerbb0d2152008-10-20 10:30:26 -07002059 spin_lock(&qdev->hw_lock);
2060 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2061 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2062 spin_unlock(&qdev->hw_lock);
2063 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002064 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002065 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002066
Ron Mercerbb0d2152008-10-20 10:30:26 -07002067 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002068
2069 /*
2070 * Check for fatal error.
2071 */
2072 if (var & STS_FE) {
2073 ql_queue_asic_error(qdev);
2074 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2075 var = ql_read32(qdev, ERR_STS);
2076 QPRINTK(qdev, INTR, ERR,
2077 "Resetting chip. Error Status Register = 0x%x\n", var);
2078 return IRQ_HANDLED;
2079 }
2080
2081 /*
2082 * Check MPI processor activity.
2083 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002084 if ((var & STS_PI) &&
2085 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002086 /*
2087 * We've got an async event or mailbox completion.
2088 * Handle it and clear the source of the interrupt.
2089 */
2090 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2091 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002092 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2093 queue_delayed_work_on(smp_processor_id(),
2094 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002095 work_done++;
2096 }
2097
2098 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002099 * Get the bit-mask that shows the active queues for this
2100 * pass. Compare it to the queues that this irq services
2101 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002102 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002103 var = ql_read32(qdev, ISR1);
2104 if (var & intr_context->irq_mask) {
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002105 QPRINTK(qdev, INTR, INFO,
Ron Mercer39aa8162009-08-27 11:02:11 +00002106 "Waking handler for rx_ring[0].\n");
2107 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002108 napi_schedule(&rx_ring->napi);
2109 work_done++;
2110 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002111 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002112 return work_done ? IRQ_HANDLED : IRQ_NONE;
2113}
2114
2115static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2116{
2117
2118 if (skb_is_gso(skb)) {
2119 int err;
2120 if (skb_header_cloned(skb)) {
2121 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2122 if (err)
2123 return err;
2124 }
2125
2126 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2127 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2128 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2129 mac_iocb_ptr->total_hdrs_len =
2130 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2131 mac_iocb_ptr->net_trans_offset =
2132 cpu_to_le16(skb_network_offset(skb) |
2133 skb_transport_offset(skb)
2134 << OB_MAC_TRANSPORT_HDR_SHIFT);
2135 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2136 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2137 if (likely(skb->protocol == htons(ETH_P_IP))) {
2138 struct iphdr *iph = ip_hdr(skb);
2139 iph->check = 0;
2140 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2141 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2142 iph->daddr, 0,
2143 IPPROTO_TCP,
2144 0);
2145 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2146 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2147 tcp_hdr(skb)->check =
2148 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2149 &ipv6_hdr(skb)->daddr,
2150 0, IPPROTO_TCP, 0);
2151 }
2152 return 1;
2153 }
2154 return 0;
2155}
2156
2157static void ql_hw_csum_setup(struct sk_buff *skb,
2158 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2159{
2160 int len;
2161 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002162 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002163 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2164 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2165 mac_iocb_ptr->net_trans_offset =
2166 cpu_to_le16(skb_network_offset(skb) |
2167 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2168
2169 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2170 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2171 if (likely(iph->protocol == IPPROTO_TCP)) {
2172 check = &(tcp_hdr(skb)->check);
2173 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2174 mac_iocb_ptr->total_hdrs_len =
2175 cpu_to_le16(skb_transport_offset(skb) +
2176 (tcp_hdr(skb)->doff << 2));
2177 } else {
2178 check = &(udp_hdr(skb)->check);
2179 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2180 mac_iocb_ptr->total_hdrs_len =
2181 cpu_to_le16(skb_transport_offset(skb) +
2182 sizeof(struct udphdr));
2183 }
2184 *check = ~csum_tcpudp_magic(iph->saddr,
2185 iph->daddr, len, iph->protocol, 0);
2186}
2187
Stephen Hemminger613573252009-08-31 19:50:58 +00002188static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002189{
2190 struct tx_ring_desc *tx_ring_desc;
2191 struct ob_mac_iocb_req *mac_iocb_ptr;
2192 struct ql_adapter *qdev = netdev_priv(ndev);
2193 int tso;
2194 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002195 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002196
2197 tx_ring = &qdev->tx_ring[tx_ring_idx];
2198
Ron Mercer74c50b42009-03-09 10:59:27 +00002199 if (skb_padto(skb, ETH_ZLEN))
2200 return NETDEV_TX_OK;
2201
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002202 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2203 QPRINTK(qdev, TX_QUEUED, INFO,
2204 "%s: shutting down tx queue %d du to lack of resources.\n",
2205 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002206 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002207 atomic_inc(&tx_ring->queue_stopped);
2208 return NETDEV_TX_BUSY;
2209 }
2210 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2211 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002212 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002213
2214 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2215 mac_iocb_ptr->tid = tx_ring_desc->index;
2216 /* We use the upper 32-bits to store the tx queue for this IO.
2217 * When we get the completion we can use it to establish the context.
2218 */
2219 mac_iocb_ptr->txq_idx = tx_ring_idx;
2220 tx_ring_desc->skb = skb;
2221
2222 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2223
2224 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2225 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2226 vlan_tx_tag_get(skb));
2227 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2228 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2229 }
2230 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2231 if (tso < 0) {
2232 dev_kfree_skb_any(skb);
2233 return NETDEV_TX_OK;
2234 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2235 ql_hw_csum_setup(skb,
2236 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2237 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002238 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2239 NETDEV_TX_OK) {
2240 QPRINTK(qdev, TX_QUEUED, ERR,
2241 "Could not map the segments.\n");
2242 return NETDEV_TX_BUSY;
2243 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002244 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2245 tx_ring->prod_idx++;
2246 if (tx_ring->prod_idx == tx_ring->wq_len)
2247 tx_ring->prod_idx = 0;
2248 wmb();
2249
2250 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002251 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2252 tx_ring->prod_idx, skb->len);
2253
2254 atomic_dec(&tx_ring->tx_count);
2255 return NETDEV_TX_OK;
2256}
2257
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002258
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002259static void ql_free_shadow_space(struct ql_adapter *qdev)
2260{
2261 if (qdev->rx_ring_shadow_reg_area) {
2262 pci_free_consistent(qdev->pdev,
2263 PAGE_SIZE,
2264 qdev->rx_ring_shadow_reg_area,
2265 qdev->rx_ring_shadow_reg_dma);
2266 qdev->rx_ring_shadow_reg_area = NULL;
2267 }
2268 if (qdev->tx_ring_shadow_reg_area) {
2269 pci_free_consistent(qdev->pdev,
2270 PAGE_SIZE,
2271 qdev->tx_ring_shadow_reg_area,
2272 qdev->tx_ring_shadow_reg_dma);
2273 qdev->tx_ring_shadow_reg_area = NULL;
2274 }
2275}
2276
2277static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2278{
2279 qdev->rx_ring_shadow_reg_area =
2280 pci_alloc_consistent(qdev->pdev,
2281 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2282 if (qdev->rx_ring_shadow_reg_area == NULL) {
2283 QPRINTK(qdev, IFUP, ERR,
2284 "Allocation of RX shadow space failed.\n");
2285 return -ENOMEM;
2286 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002287 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002288 qdev->tx_ring_shadow_reg_area =
2289 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2290 &qdev->tx_ring_shadow_reg_dma);
2291 if (qdev->tx_ring_shadow_reg_area == NULL) {
2292 QPRINTK(qdev, IFUP, ERR,
2293 "Allocation of TX shadow space failed.\n");
2294 goto err_wqp_sh_area;
2295 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002296 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002297 return 0;
2298
2299err_wqp_sh_area:
2300 pci_free_consistent(qdev->pdev,
2301 PAGE_SIZE,
2302 qdev->rx_ring_shadow_reg_area,
2303 qdev->rx_ring_shadow_reg_dma);
2304 return -ENOMEM;
2305}
2306
2307static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2308{
2309 struct tx_ring_desc *tx_ring_desc;
2310 int i;
2311 struct ob_mac_iocb_req *mac_iocb_ptr;
2312
2313 mac_iocb_ptr = tx_ring->wq_base;
2314 tx_ring_desc = tx_ring->q;
2315 for (i = 0; i < tx_ring->wq_len; i++) {
2316 tx_ring_desc->index = i;
2317 tx_ring_desc->skb = NULL;
2318 tx_ring_desc->queue_entry = mac_iocb_ptr;
2319 mac_iocb_ptr++;
2320 tx_ring_desc++;
2321 }
2322 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2323 atomic_set(&tx_ring->queue_stopped, 0);
2324}
2325
2326static void ql_free_tx_resources(struct ql_adapter *qdev,
2327 struct tx_ring *tx_ring)
2328{
2329 if (tx_ring->wq_base) {
2330 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2331 tx_ring->wq_base, tx_ring->wq_base_dma);
2332 tx_ring->wq_base = NULL;
2333 }
2334 kfree(tx_ring->q);
2335 tx_ring->q = NULL;
2336}
2337
2338static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2339 struct tx_ring *tx_ring)
2340{
2341 tx_ring->wq_base =
2342 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2343 &tx_ring->wq_base_dma);
2344
2345 if ((tx_ring->wq_base == NULL)
Ron Mercer88c55e32009-06-10 15:49:33 +00002346 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002347 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2348 return -ENOMEM;
2349 }
2350 tx_ring->q =
2351 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2352 if (tx_ring->q == NULL)
2353 goto err;
2354
2355 return 0;
2356err:
2357 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2358 tx_ring->wq_base, tx_ring->wq_base_dma);
2359 return -ENOMEM;
2360}
2361
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002362static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002363{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002364 struct bq_desc *lbq_desc;
2365
Ron Mercer7c734352009-10-19 03:32:19 +00002366 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002367
Ron Mercer7c734352009-10-19 03:32:19 +00002368 curr_idx = rx_ring->lbq_curr_idx;
2369 clean_idx = rx_ring->lbq_clean_idx;
2370 while (curr_idx != clean_idx) {
2371 lbq_desc = &rx_ring->lbq[curr_idx];
2372
2373 if (lbq_desc->p.pg_chunk.last_flag) {
2374 pci_unmap_page(qdev->pdev,
2375 lbq_desc->p.pg_chunk.map,
2376 ql_lbq_block_size(qdev),
2377 PCI_DMA_FROMDEVICE);
2378 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002379 }
Ron Mercer7c734352009-10-19 03:32:19 +00002380
2381 put_page(lbq_desc->p.pg_chunk.page);
2382 lbq_desc->p.pg_chunk.page = NULL;
2383
2384 if (++curr_idx == rx_ring->lbq_len)
2385 curr_idx = 0;
2386
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002387 }
2388}
2389
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002390static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002391{
2392 int i;
2393 struct bq_desc *sbq_desc;
2394
2395 for (i = 0; i < rx_ring->sbq_len; i++) {
2396 sbq_desc = &rx_ring->sbq[i];
2397 if (sbq_desc == NULL) {
2398 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2399 return;
2400 }
2401 if (sbq_desc->p.skb) {
2402 pci_unmap_single(qdev->pdev,
2403 pci_unmap_addr(sbq_desc, mapaddr),
2404 pci_unmap_len(sbq_desc, maplen),
2405 PCI_DMA_FROMDEVICE);
2406 dev_kfree_skb(sbq_desc->p.skb);
2407 sbq_desc->p.skb = NULL;
2408 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002409 }
2410}
2411
Ron Mercer4545a3f2009-02-23 10:42:17 +00002412/* Free all large and small rx buffers associated
2413 * with the completion queues for this device.
2414 */
2415static void ql_free_rx_buffers(struct ql_adapter *qdev)
2416{
2417 int i;
2418 struct rx_ring *rx_ring;
2419
2420 for (i = 0; i < qdev->rx_ring_count; i++) {
2421 rx_ring = &qdev->rx_ring[i];
2422 if (rx_ring->lbq)
2423 ql_free_lbq_buffers(qdev, rx_ring);
2424 if (rx_ring->sbq)
2425 ql_free_sbq_buffers(qdev, rx_ring);
2426 }
2427}
2428
2429static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2430{
2431 struct rx_ring *rx_ring;
2432 int i;
2433
2434 for (i = 0; i < qdev->rx_ring_count; i++) {
2435 rx_ring = &qdev->rx_ring[i];
2436 if (rx_ring->type != TX_Q)
2437 ql_update_buffer_queues(qdev, rx_ring);
2438 }
2439}
2440
2441static void ql_init_lbq_ring(struct ql_adapter *qdev,
2442 struct rx_ring *rx_ring)
2443{
2444 int i;
2445 struct bq_desc *lbq_desc;
2446 __le64 *bq = rx_ring->lbq_base;
2447
2448 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2449 for (i = 0; i < rx_ring->lbq_len; i++) {
2450 lbq_desc = &rx_ring->lbq[i];
2451 memset(lbq_desc, 0, sizeof(*lbq_desc));
2452 lbq_desc->index = i;
2453 lbq_desc->addr = bq;
2454 bq++;
2455 }
2456}
2457
2458static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002459 struct rx_ring *rx_ring)
2460{
2461 int i;
2462 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002463 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002464
Ron Mercer4545a3f2009-02-23 10:42:17 +00002465 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002466 for (i = 0; i < rx_ring->sbq_len; i++) {
2467 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002468 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002469 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002470 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002471 bq++;
2472 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002473}
2474
2475static void ql_free_rx_resources(struct ql_adapter *qdev,
2476 struct rx_ring *rx_ring)
2477{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002478 /* Free the small buffer queue. */
2479 if (rx_ring->sbq_base) {
2480 pci_free_consistent(qdev->pdev,
2481 rx_ring->sbq_size,
2482 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2483 rx_ring->sbq_base = NULL;
2484 }
2485
2486 /* Free the small buffer queue control blocks. */
2487 kfree(rx_ring->sbq);
2488 rx_ring->sbq = NULL;
2489
2490 /* Free the large buffer queue. */
2491 if (rx_ring->lbq_base) {
2492 pci_free_consistent(qdev->pdev,
2493 rx_ring->lbq_size,
2494 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2495 rx_ring->lbq_base = NULL;
2496 }
2497
2498 /* Free the large buffer queue control blocks. */
2499 kfree(rx_ring->lbq);
2500 rx_ring->lbq = NULL;
2501
2502 /* Free the rx queue. */
2503 if (rx_ring->cq_base) {
2504 pci_free_consistent(qdev->pdev,
2505 rx_ring->cq_size,
2506 rx_ring->cq_base, rx_ring->cq_base_dma);
2507 rx_ring->cq_base = NULL;
2508 }
2509}
2510
2511/* Allocate queues and buffers for this completions queue based
2512 * on the values in the parameter structure. */
2513static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2514 struct rx_ring *rx_ring)
2515{
2516
2517 /*
2518 * Allocate the completion queue for this rx_ring.
2519 */
2520 rx_ring->cq_base =
2521 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2522 &rx_ring->cq_base_dma);
2523
2524 if (rx_ring->cq_base == NULL) {
2525 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2526 return -ENOMEM;
2527 }
2528
2529 if (rx_ring->sbq_len) {
2530 /*
2531 * Allocate small buffer queue.
2532 */
2533 rx_ring->sbq_base =
2534 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2535 &rx_ring->sbq_base_dma);
2536
2537 if (rx_ring->sbq_base == NULL) {
2538 QPRINTK(qdev, IFUP, ERR,
2539 "Small buffer queue allocation failed.\n");
2540 goto err_mem;
2541 }
2542
2543 /*
2544 * Allocate small buffer queue control blocks.
2545 */
2546 rx_ring->sbq =
2547 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2548 GFP_KERNEL);
2549 if (rx_ring->sbq == NULL) {
2550 QPRINTK(qdev, IFUP, ERR,
2551 "Small buffer queue control block allocation failed.\n");
2552 goto err_mem;
2553 }
2554
Ron Mercer4545a3f2009-02-23 10:42:17 +00002555 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002556 }
2557
2558 if (rx_ring->lbq_len) {
2559 /*
2560 * Allocate large buffer queue.
2561 */
2562 rx_ring->lbq_base =
2563 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2564 &rx_ring->lbq_base_dma);
2565
2566 if (rx_ring->lbq_base == NULL) {
2567 QPRINTK(qdev, IFUP, ERR,
2568 "Large buffer queue allocation failed.\n");
2569 goto err_mem;
2570 }
2571 /*
2572 * Allocate large buffer queue control blocks.
2573 */
2574 rx_ring->lbq =
2575 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2576 GFP_KERNEL);
2577 if (rx_ring->lbq == NULL) {
2578 QPRINTK(qdev, IFUP, ERR,
2579 "Large buffer queue control block allocation failed.\n");
2580 goto err_mem;
2581 }
2582
Ron Mercer4545a3f2009-02-23 10:42:17 +00002583 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002584 }
2585
2586 return 0;
2587
2588err_mem:
2589 ql_free_rx_resources(qdev, rx_ring);
2590 return -ENOMEM;
2591}
2592
2593static void ql_tx_ring_clean(struct ql_adapter *qdev)
2594{
2595 struct tx_ring *tx_ring;
2596 struct tx_ring_desc *tx_ring_desc;
2597 int i, j;
2598
2599 /*
2600 * Loop through all queues and free
2601 * any resources.
2602 */
2603 for (j = 0; j < qdev->tx_ring_count; j++) {
2604 tx_ring = &qdev->tx_ring[j];
2605 for (i = 0; i < tx_ring->wq_len; i++) {
2606 tx_ring_desc = &tx_ring->q[i];
2607 if (tx_ring_desc && tx_ring_desc->skb) {
2608 QPRINTK(qdev, IFDOWN, ERR,
2609 "Freeing lost SKB %p, from queue %d, index %d.\n",
2610 tx_ring_desc->skb, j,
2611 tx_ring_desc->index);
2612 ql_unmap_send(qdev, tx_ring_desc,
2613 tx_ring_desc->map_cnt);
2614 dev_kfree_skb(tx_ring_desc->skb);
2615 tx_ring_desc->skb = NULL;
2616 }
2617 }
2618 }
2619}
2620
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002621static void ql_free_mem_resources(struct ql_adapter *qdev)
2622{
2623 int i;
2624
2625 for (i = 0; i < qdev->tx_ring_count; i++)
2626 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2627 for (i = 0; i < qdev->rx_ring_count; i++)
2628 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2629 ql_free_shadow_space(qdev);
2630}
2631
2632static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2633{
2634 int i;
2635
2636 /* Allocate space for our shadow registers and such. */
2637 if (ql_alloc_shadow_space(qdev))
2638 return -ENOMEM;
2639
2640 for (i = 0; i < qdev->rx_ring_count; i++) {
2641 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2642 QPRINTK(qdev, IFUP, ERR,
2643 "RX resource allocation failed.\n");
2644 goto err_mem;
2645 }
2646 }
2647 /* Allocate tx queue resources */
2648 for (i = 0; i < qdev->tx_ring_count; i++) {
2649 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2650 QPRINTK(qdev, IFUP, ERR,
2651 "TX resource allocation failed.\n");
2652 goto err_mem;
2653 }
2654 }
2655 return 0;
2656
2657err_mem:
2658 ql_free_mem_resources(qdev);
2659 return -ENOMEM;
2660}
2661
2662/* Set up the rx ring control block and pass it to the chip.
2663 * The control block is defined as
2664 * "Completion Queue Initialization Control Block", or cqicb.
2665 */
2666static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2667{
2668 struct cqicb *cqicb = &rx_ring->cqicb;
2669 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002670 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002671 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002672 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002673 void __iomem *doorbell_area =
2674 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2675 int err = 0;
2676 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002677 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002678 __le64 *base_indirect_ptr;
2679 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002680
2681 /* Set up the shadow registers for this ring. */
2682 rx_ring->prod_idx_sh_reg = shadow_reg;
2683 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00002684 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002685 shadow_reg += sizeof(u64);
2686 shadow_reg_dma += sizeof(u64);
2687 rx_ring->lbq_base_indirect = shadow_reg;
2688 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002689 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2690 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002691 rx_ring->sbq_base_indirect = shadow_reg;
2692 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2693
2694 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002695 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002696 rx_ring->cnsmr_idx = 0;
2697 rx_ring->curr_entry = rx_ring->cq_base;
2698
2699 /* PCI doorbell mem area + 0x04 for valid register */
2700 rx_ring->valid_db_reg = doorbell_area + 0x04;
2701
2702 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002703 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002704
2705 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002706 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002707
2708 memset((void *)cqicb, 0, sizeof(struct cqicb));
2709 cqicb->msix_vect = rx_ring->irq;
2710
Ron Mercer459caf52009-01-04 17:08:11 -08002711 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2712 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002713
Ron Mercer97345522009-01-09 11:31:50 +00002714 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002715
Ron Mercer97345522009-01-09 11:31:50 +00002716 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002717
2718 /*
2719 * Set up the control block load flags.
2720 */
2721 cqicb->flags = FLAGS_LC | /* Load queue base address */
2722 FLAGS_LV | /* Load MSI-X vector */
2723 FLAGS_LI; /* Load irq delay values */
2724 if (rx_ring->lbq_len) {
2725 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002726 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002727 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2728 page_entries = 0;
2729 do {
2730 *base_indirect_ptr = cpu_to_le64(tmp);
2731 tmp += DB_PAGE_SIZE;
2732 base_indirect_ptr++;
2733 page_entries++;
2734 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002735 cqicb->lbq_addr =
2736 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002737 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2738 (u16) rx_ring->lbq_buf_size;
2739 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2740 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2741 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002742 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002743 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002744 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002745 rx_ring->lbq_clean_idx = 0;
2746 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747 }
2748 if (rx_ring->sbq_len) {
2749 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002750 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002751 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2752 page_entries = 0;
2753 do {
2754 *base_indirect_ptr = cpu_to_le64(tmp);
2755 tmp += DB_PAGE_SIZE;
2756 base_indirect_ptr++;
2757 page_entries++;
2758 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002759 cqicb->sbq_addr =
2760 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002761 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00002762 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08002763 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2764 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002765 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002766 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002767 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002768 rx_ring->sbq_clean_idx = 0;
2769 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002770 }
2771 switch (rx_ring->type) {
2772 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002773 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2774 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2775 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002776 case RX_Q:
2777 /* Inbound completion handling rx_rings run in
2778 * separate NAPI contexts.
2779 */
2780 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2781 64);
2782 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2783 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2784 break;
2785 default:
2786 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2787 rx_ring->type);
2788 }
Ron Mercer49740972009-02-26 10:08:36 +00002789 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002790 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2791 CFG_LCQ, rx_ring->cq_id);
2792 if (err) {
2793 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2794 return err;
2795 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002796 return err;
2797}
2798
2799static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2800{
2801 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2802 void __iomem *doorbell_area =
2803 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2804 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2805 (tx_ring->wq_id * sizeof(u64));
2806 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2807 (tx_ring->wq_id * sizeof(u64));
2808 int err = 0;
2809
2810 /*
2811 * Assign doorbell registers for this tx_ring.
2812 */
2813 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002814 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002815 tx_ring->prod_idx = 0;
2816 /* TX PCI doorbell mem area + 0x04 */
2817 tx_ring->valid_db_reg = doorbell_area + 0x04;
2818
2819 /*
2820 * Assign shadow registers for this tx_ring.
2821 */
2822 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2823 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2824
2825 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2826 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2827 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2828 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2829 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002830 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002831
Ron Mercer97345522009-01-09 11:31:50 +00002832 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002833
2834 ql_init_tx_ring(qdev, tx_ring);
2835
Ron Mercere3324712009-07-02 06:06:13 +00002836 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002837 (u16) tx_ring->wq_id);
2838 if (err) {
2839 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2840 return err;
2841 }
Ron Mercer49740972009-02-26 10:08:36 +00002842 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002843 return err;
2844}
2845
2846static void ql_disable_msix(struct ql_adapter *qdev)
2847{
2848 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2849 pci_disable_msix(qdev->pdev);
2850 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2851 kfree(qdev->msi_x_entry);
2852 qdev->msi_x_entry = NULL;
2853 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2854 pci_disable_msi(qdev->pdev);
2855 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2856 }
2857}
2858
Ron Mercera4ab6132009-08-27 11:02:10 +00002859/* We start by trying to get the number of vectors
2860 * stored in qdev->intr_count. If we don't get that
2861 * many then we reduce the count and try again.
2862 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002863static void ql_enable_msix(struct ql_adapter *qdev)
2864{
Ron Mercera4ab6132009-08-27 11:02:10 +00002865 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002866
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002867 /* Get the MSIX vectors. */
2868 if (irq_type == MSIX_IRQ) {
2869 /* Try to alloc space for the msix struct,
2870 * if it fails then go to MSI/legacy.
2871 */
Ron Mercera4ab6132009-08-27 11:02:10 +00002872 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002873 sizeof(struct msix_entry),
2874 GFP_KERNEL);
2875 if (!qdev->msi_x_entry) {
2876 irq_type = MSI_IRQ;
2877 goto msi;
2878 }
2879
Ron Mercera4ab6132009-08-27 11:02:10 +00002880 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002881 qdev->msi_x_entry[i].entry = i;
2882
Ron Mercera4ab6132009-08-27 11:02:10 +00002883 /* Loop to get our vectors. We start with
2884 * what we want and settle for what we get.
2885 */
2886 do {
2887 err = pci_enable_msix(qdev->pdev,
2888 qdev->msi_x_entry, qdev->intr_count);
2889 if (err > 0)
2890 qdev->intr_count = err;
2891 } while (err > 0);
2892
2893 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002894 kfree(qdev->msi_x_entry);
2895 qdev->msi_x_entry = NULL;
2896 QPRINTK(qdev, IFUP, WARNING,
2897 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00002898 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002899 irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00002900 } else if (err == 0) {
2901 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2902 QPRINTK(qdev, IFUP, INFO,
2903 "MSI-X Enabled, got %d vectors.\n",
2904 qdev->intr_count);
2905 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002906 }
2907 }
2908msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00002909 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002910 if (irq_type == MSI_IRQ) {
2911 if (!pci_enable_msi(qdev->pdev)) {
2912 set_bit(QL_MSI_ENABLED, &qdev->flags);
2913 QPRINTK(qdev, IFUP, INFO,
2914 "Running with MSI interrupts.\n");
2915 return;
2916 }
2917 }
2918 irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002919 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2920}
2921
Ron Mercer39aa8162009-08-27 11:02:11 +00002922/* Each vector services 1 RSS ring and and 1 or more
2923 * TX completion rings. This function loops through
2924 * the TX completion rings and assigns the vector that
2925 * will service it. An example would be if there are
2926 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2927 * This would mean that vector 0 would service RSS ring 0
2928 * and TX competion rings 0,1,2 and 3. Vector 1 would
2929 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2930 */
2931static void ql_set_tx_vect(struct ql_adapter *qdev)
2932{
2933 int i, j, vect;
2934 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2935
2936 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2937 /* Assign irq vectors to TX rx_rings.*/
2938 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2939 i < qdev->rx_ring_count; i++) {
2940 if (j == tx_rings_per_vector) {
2941 vect++;
2942 j = 0;
2943 }
2944 qdev->rx_ring[i].irq = vect;
2945 j++;
2946 }
2947 } else {
2948 /* For single vector all rings have an irq
2949 * of zero.
2950 */
2951 for (i = 0; i < qdev->rx_ring_count; i++)
2952 qdev->rx_ring[i].irq = 0;
2953 }
2954}
2955
2956/* Set the interrupt mask for this vector. Each vector
2957 * will service 1 RSS ring and 1 or more TX completion
2958 * rings. This function sets up a bit mask per vector
2959 * that indicates which rings it services.
2960 */
2961static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2962{
2963 int j, vect = ctx->intr;
2964 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2965
2966 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2967 /* Add the RSS ring serviced by this vector
2968 * to the mask.
2969 */
2970 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2971 /* Add the TX ring(s) serviced by this vector
2972 * to the mask. */
2973 for (j = 0; j < tx_rings_per_vector; j++) {
2974 ctx->irq_mask |=
2975 (1 << qdev->rx_ring[qdev->rss_ring_count +
2976 (vect * tx_rings_per_vector) + j].cq_id);
2977 }
2978 } else {
2979 /* For single vector we just shift each queue's
2980 * ID into the mask.
2981 */
2982 for (j = 0; j < qdev->rx_ring_count; j++)
2983 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2984 }
2985}
2986
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002987/*
2988 * Here we build the intr_context structures based on
2989 * our rx_ring count and intr vector count.
2990 * The intr_context structure is used to hook each vector
2991 * to possibly different handlers.
2992 */
2993static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2994{
2995 int i = 0;
2996 struct intr_context *intr_context = &qdev->intr_context[0];
2997
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002998 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2999 /* Each rx_ring has it's
3000 * own intr_context since we have separate
3001 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003002 */
3003 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3004 qdev->rx_ring[i].irq = i;
3005 intr_context->intr = i;
3006 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003007 /* Set up this vector's bit-mask that indicates
3008 * which queues it services.
3009 */
3010 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003011 /*
3012 * We set up each vectors enable/disable/read bits so
3013 * there's no bit/mask calculations in the critical path.
3014 */
3015 intr_context->intr_en_mask =
3016 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3017 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3018 | i;
3019 intr_context->intr_dis_mask =
3020 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3021 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3022 INTR_EN_IHD | i;
3023 intr_context->intr_read_mask =
3024 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3025 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3026 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003027 if (i == 0) {
3028 /* The first vector/queue handles
3029 * broadcast/multicast, fatal errors,
3030 * and firmware events. This in addition
3031 * to normal inbound NAPI processing.
3032 */
3033 intr_context->handler = qlge_isr;
3034 sprintf(intr_context->name, "%s-rx-%d",
3035 qdev->ndev->name, i);
3036 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003037 /*
3038 * Inbound queues handle unicast frames only.
3039 */
3040 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003041 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003042 qdev->ndev->name, i);
3043 }
3044 }
3045 } else {
3046 /*
3047 * All rx_rings use the same intr_context since
3048 * there is only one vector.
3049 */
3050 intr_context->intr = 0;
3051 intr_context->qdev = qdev;
3052 /*
3053 * We set up each vectors enable/disable/read bits so
3054 * there's no bit/mask calculations in the critical path.
3055 */
3056 intr_context->intr_en_mask =
3057 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3058 intr_context->intr_dis_mask =
3059 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3060 INTR_EN_TYPE_DISABLE;
3061 intr_context->intr_read_mask =
3062 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3063 /*
3064 * Single interrupt means one handler for all rings.
3065 */
3066 intr_context->handler = qlge_isr;
3067 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003068 /* Set up this vector's bit-mask that indicates
3069 * which queues it services. In this case there is
3070 * a single vector so it will service all RSS and
3071 * TX completion rings.
3072 */
3073 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003074 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003075 /* Tell the TX completion rings which MSIx vector
3076 * they will be using.
3077 */
3078 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003079}
3080
3081static void ql_free_irq(struct ql_adapter *qdev)
3082{
3083 int i;
3084 struct intr_context *intr_context = &qdev->intr_context[0];
3085
3086 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3087 if (intr_context->hooked) {
3088 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3089 free_irq(qdev->msi_x_entry[i].vector,
3090 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00003091 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003092 "freeing msix interrupt %d.\n", i);
3093 } else {
3094 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00003095 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003096 "freeing msi interrupt %d.\n", i);
3097 }
3098 }
3099 }
3100 ql_disable_msix(qdev);
3101}
3102
3103static int ql_request_irq(struct ql_adapter *qdev)
3104{
3105 int i;
3106 int status = 0;
3107 struct pci_dev *pdev = qdev->pdev;
3108 struct intr_context *intr_context = &qdev->intr_context[0];
3109
3110 ql_resolve_queues_to_irqs(qdev);
3111
3112 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3113 atomic_set(&intr_context->irq_cnt, 0);
3114 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3115 status = request_irq(qdev->msi_x_entry[i].vector,
3116 intr_context->handler,
3117 0,
3118 intr_context->name,
3119 &qdev->rx_ring[i]);
3120 if (status) {
3121 QPRINTK(qdev, IFUP, ERR,
3122 "Failed request for MSIX interrupt %d.\n",
3123 i);
3124 goto err_irq;
3125 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003126 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003127 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3128 i,
3129 qdev->rx_ring[i].type ==
3130 DEFAULT_Q ? "DEFAULT_Q" : "",
3131 qdev->rx_ring[i].type ==
3132 TX_Q ? "TX_Q" : "",
3133 qdev->rx_ring[i].type ==
3134 RX_Q ? "RX_Q" : "", intr_context->name);
3135 }
3136 } else {
3137 QPRINTK(qdev, IFUP, DEBUG,
3138 "trying msi or legacy interrupts.\n");
3139 QPRINTK(qdev, IFUP, DEBUG,
3140 "%s: irq = %d.\n", __func__, pdev->irq);
3141 QPRINTK(qdev, IFUP, DEBUG,
3142 "%s: context->name = %s.\n", __func__,
3143 intr_context->name);
3144 QPRINTK(qdev, IFUP, DEBUG,
3145 "%s: dev_id = 0x%p.\n", __func__,
3146 &qdev->rx_ring[0]);
3147 status =
3148 request_irq(pdev->irq, qlge_isr,
3149 test_bit(QL_MSI_ENABLED,
3150 &qdev->
3151 flags) ? 0 : IRQF_SHARED,
3152 intr_context->name, &qdev->rx_ring[0]);
3153 if (status)
3154 goto err_irq;
3155
3156 QPRINTK(qdev, IFUP, ERR,
3157 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3158 i,
3159 qdev->rx_ring[0].type ==
3160 DEFAULT_Q ? "DEFAULT_Q" : "",
3161 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3162 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3163 intr_context->name);
3164 }
3165 intr_context->hooked = 1;
3166 }
3167 return status;
3168err_irq:
3169 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3170 ql_free_irq(qdev);
3171 return status;
3172}
3173
3174static int ql_start_rss(struct ql_adapter *qdev)
3175{
Ron Mercer541ae282009-10-08 09:54:37 +00003176 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3177 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3178 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3179 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3180 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3181 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 struct ricb *ricb = &qdev->ricb;
3183 int status = 0;
3184 int i;
3185 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3186
Ron Mercere3324712009-07-02 06:06:13 +00003187 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003188
Ron Mercerb2014ff2009-08-27 11:02:09 +00003189 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003190 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003191 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3192 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003193
3194 /*
3195 * Fill out the Indirection Table.
3196 */
Ron Mercer541ae282009-10-08 09:54:37 +00003197 for (i = 0; i < 1024; i++)
3198 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003199
Ron Mercer541ae282009-10-08 09:54:37 +00003200 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3201 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003202
Ron Mercer49740972009-02-26 10:08:36 +00003203 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003204
Ron Mercere3324712009-07-02 06:06:13 +00003205 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003206 if (status) {
3207 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3208 return status;
3209 }
Ron Mercer49740972009-02-26 10:08:36 +00003210 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003211 return status;
3212}
3213
Ron Mercera5f59dc2009-07-02 06:06:07 +00003214static int ql_clear_routing_entries(struct ql_adapter *qdev)
3215{
3216 int i, status = 0;
3217
3218 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3219 if (status)
3220 return status;
3221 /* Clear all the entries in the routing table. */
3222 for (i = 0; i < 16; i++) {
3223 status = ql_set_routing_reg(qdev, i, 0, 0);
3224 if (status) {
3225 QPRINTK(qdev, IFUP, ERR,
3226 "Failed to init routing register for CAM "
3227 "packets.\n");
3228 break;
3229 }
3230 }
3231 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3232 return status;
3233}
3234
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003235/* Initialize the frame-to-queue routing. */
3236static int ql_route_initialize(struct ql_adapter *qdev)
3237{
3238 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003239
3240 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003241 status = ql_clear_routing_entries(qdev);
3242 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003243 return status;
3244
3245 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3246 if (status)
3247 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003248
3249 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3250 if (status) {
3251 QPRINTK(qdev, IFUP, ERR,
3252 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003253 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003254 }
3255 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3256 if (status) {
3257 QPRINTK(qdev, IFUP, ERR,
3258 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003259 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260 }
3261 /* If we have more than one inbound queue, then turn on RSS in the
3262 * routing block.
3263 */
3264 if (qdev->rss_ring_count > 1) {
3265 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3266 RT_IDX_RSS_MATCH, 1);
3267 if (status) {
3268 QPRINTK(qdev, IFUP, ERR,
3269 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003270 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003271 }
3272 }
3273
3274 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3275 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003276 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003277 QPRINTK(qdev, IFUP, ERR,
3278 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003279exit:
3280 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003281 return status;
3282}
3283
Ron Mercer2ee1e272009-03-03 12:10:33 +00003284int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003285{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003286 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003287
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003288 /* If check if the link is up and use to
3289 * determine if we are setting or clearing
3290 * the MAC address in the CAM.
3291 */
3292 set = ql_read32(qdev, STS);
3293 set &= qdev->port_link_up;
3294 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003295 if (status) {
3296 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3297 return status;
3298 }
3299
3300 status = ql_route_initialize(qdev);
3301 if (status)
3302 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3303
3304 return status;
3305}
3306
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003307static int ql_adapter_initialize(struct ql_adapter *qdev)
3308{
3309 u32 value, mask;
3310 int i;
3311 int status = 0;
3312
3313 /*
3314 * Set up the System register to halt on errors.
3315 */
3316 value = SYS_EFE | SYS_FAE;
3317 mask = value << 16;
3318 ql_write32(qdev, SYS, mask | value);
3319
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003320 /* Set the default queue, and VLAN behavior. */
3321 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3322 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003323 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3324
3325 /* Set the MPI interrupt to enabled. */
3326 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3327
3328 /* Enable the function, set pagesize, enable error checking. */
3329 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3330 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3331
3332 /* Set/clear header splitting. */
3333 mask = FSC_VM_PAGESIZE_MASK |
3334 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3335 ql_write32(qdev, FSC, mask | value);
3336
3337 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
Ron Mercer52e55f32009-10-10 09:35:07 +00003338 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003339
Ron Mercera3b71932009-10-08 09:54:38 +00003340 /* Set RX packet routing to use port/pci function on which the
3341 * packet arrived on in addition to usual frame routing.
3342 * This is helpful on bonding where both interfaces can have
3343 * the same MAC address.
3344 */
3345 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003346 /* Reroute all packets to our Interface.
3347 * They may have been routed to MPI firmware
3348 * due to WOL.
3349 */
3350 value = ql_read32(qdev, MGMT_RCV_CFG);
3351 value &= ~MGMT_RCV_CFG_RM;
3352 mask = 0xffff0000;
3353
3354 /* Sticky reg needs clearing due to WOL. */
3355 ql_write32(qdev, MGMT_RCV_CFG, mask);
3356 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3357
3358 /* Default WOL is enable on Mezz cards */
3359 if (qdev->pdev->subsystem_device == 0x0068 ||
3360 qdev->pdev->subsystem_device == 0x0180)
3361 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003362
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003363 /* Start up the rx queues. */
3364 for (i = 0; i < qdev->rx_ring_count; i++) {
3365 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3366 if (status) {
3367 QPRINTK(qdev, IFUP, ERR,
3368 "Failed to start rx ring[%d].\n", i);
3369 return status;
3370 }
3371 }
3372
3373 /* If there is more than one inbound completion queue
3374 * then download a RICB to configure RSS.
3375 */
3376 if (qdev->rss_ring_count > 1) {
3377 status = ql_start_rss(qdev);
3378 if (status) {
3379 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3380 return status;
3381 }
3382 }
3383
3384 /* Start up the tx queues. */
3385 for (i = 0; i < qdev->tx_ring_count; i++) {
3386 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3387 if (status) {
3388 QPRINTK(qdev, IFUP, ERR,
3389 "Failed to start tx ring[%d].\n", i);
3390 return status;
3391 }
3392 }
3393
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003394 /* Initialize the port and set the max framesize. */
3395 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003396 if (status)
3397 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003398
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003399 /* Set up the MAC address and frame routing filter. */
3400 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003401 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003402 QPRINTK(qdev, IFUP, ERR,
3403 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003404 return status;
3405 }
3406
3407 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003408 for (i = 0; i < qdev->rss_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003409 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003410 i);
3411 napi_enable(&qdev->rx_ring[i].napi);
3412 }
3413
3414 return status;
3415}
3416
3417/* Issue soft reset to chip. */
3418static int ql_adapter_reset(struct ql_adapter *qdev)
3419{
3420 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003421 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003422 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003423
Ron Mercera5f59dc2009-07-02 06:06:07 +00003424 /* Clear all the entries in the routing table. */
3425 status = ql_clear_routing_entries(qdev);
3426 if (status) {
3427 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3428 return status;
3429 }
3430
3431 end_jiffies = jiffies +
3432 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003433
3434 /* Stop management traffic. */
3435 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3436
3437 /* Wait for the NIC and MGMNT FIFOs to empty. */
3438 ql_wait_fifo_empty(qdev);
3439
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003440 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003441
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003442 do {
3443 value = ql_read32(qdev, RST_FO);
3444 if ((value & RST_FO_FR) == 0)
3445 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003446 cpu_relax();
3447 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003448
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003449 if (value & RST_FO_FR) {
3450 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003451 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003452 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003453 }
3454
Ron Mercer84087f42009-10-08 09:54:41 +00003455 /* Resume management traffic. */
3456 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003457 return status;
3458}
3459
3460static void ql_display_dev_info(struct net_device *ndev)
3461{
3462 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3463
3464 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003465 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003466 "XG Roll = %d, XG Rev = %d.\n",
3467 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003468 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003469 qdev->chip_rev_id & 0x0000000f,
3470 qdev->chip_rev_id >> 4 & 0x0000000f,
3471 qdev->chip_rev_id >> 8 & 0x0000000f,
3472 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003473 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003474}
3475
Ron Mercerbc083ce2009-10-21 11:07:40 +00003476int ql_wol(struct ql_adapter *qdev)
3477{
3478 int status = 0;
3479 u32 wol = MB_WOL_DISABLE;
3480
3481 /* The CAM is still intact after a reset, but if we
3482 * are doing WOL, then we may need to program the
3483 * routing regs. We would also need to issue the mailbox
3484 * commands to instruct the MPI what to do per the ethtool
3485 * settings.
3486 */
3487
3488 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3489 WAKE_MCAST | WAKE_BCAST)) {
3490 QPRINTK(qdev, IFDOWN, ERR,
3491 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3492 qdev->wol);
3493 return -EINVAL;
3494 }
3495
3496 if (qdev->wol & WAKE_MAGIC) {
3497 status = ql_mb_wol_set_magic(qdev, 1);
3498 if (status) {
3499 QPRINTK(qdev, IFDOWN, ERR,
3500 "Failed to set magic packet on %s.\n",
3501 qdev->ndev->name);
3502 return status;
3503 } else
3504 QPRINTK(qdev, DRV, INFO,
3505 "Enabled magic packet successfully on %s.\n",
3506 qdev->ndev->name);
3507
3508 wol |= MB_WOL_MAGIC_PKT;
3509 }
3510
3511 if (qdev->wol) {
3512 /* Reroute all packets to Management Interface */
3513 ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
3514 (MGMT_RCV_CFG_RM << 16)));
3515 wol |= MB_WOL_MODE_ON;
3516 status = ql_mb_wol_mode(qdev, wol);
3517 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3518 (status == 0) ? "Sucessfully set" : "Failed", wol,
3519 qdev->ndev->name);
3520 }
3521
3522 return status;
3523}
3524
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003525static int ql_adapter_down(struct ql_adapter *qdev)
3526{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003527 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003528
Ron Mercer6a473302009-07-02 06:06:12 +00003529 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003530
Ron Mercer6497b602009-02-12 16:37:13 -08003531 /* Don't kill the reset worker thread if we
3532 * are in the process of recovery.
3533 */
3534 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3535 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003536 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3537 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003538 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003539 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540
Ron Mercer39aa8162009-08-27 11:02:11 +00003541 for (i = 0; i < qdev->rss_ring_count; i++)
3542 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003543
3544 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3545
3546 ql_disable_interrupts(qdev);
3547
3548 ql_tx_ring_clean(qdev);
3549
Ron Mercer6b318cb2009-03-09 10:59:26 +00003550 /* Call netif_napi_del() from common point.
3551 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003552 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003553 netif_napi_del(&qdev->rx_ring[i].napi);
3554
Ron Mercer4545a3f2009-02-23 10:42:17 +00003555 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003556
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003557 status = ql_adapter_reset(qdev);
3558 if (status)
3559 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3560 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003561 return status;
3562}
3563
3564static int ql_adapter_up(struct ql_adapter *qdev)
3565{
3566 int err = 0;
3567
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003568 err = ql_adapter_initialize(qdev);
3569 if (err) {
3570 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003571 goto err_init;
3572 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003573 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003574 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003575 /* If the port is initialized and the
3576 * link is up the turn on the carrier.
3577 */
3578 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3579 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003580 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003581 ql_enable_interrupts(qdev);
3582 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003583 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003584
3585 return 0;
3586err_init:
3587 ql_adapter_reset(qdev);
3588 return err;
3589}
3590
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003591static void ql_release_adapter_resources(struct ql_adapter *qdev)
3592{
3593 ql_free_mem_resources(qdev);
3594 ql_free_irq(qdev);
3595}
3596
3597static int ql_get_adapter_resources(struct ql_adapter *qdev)
3598{
3599 int status = 0;
3600
3601 if (ql_alloc_mem_resources(qdev)) {
3602 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3603 return -ENOMEM;
3604 }
3605 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003606 return status;
3607}
3608
3609static int qlge_close(struct net_device *ndev)
3610{
3611 struct ql_adapter *qdev = netdev_priv(ndev);
3612
3613 /*
3614 * Wait for device to recover from a reset.
3615 * (Rarely happens, but possible.)
3616 */
3617 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3618 msleep(1);
3619 ql_adapter_down(qdev);
3620 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003621 return 0;
3622}
3623
3624static int ql_configure_rings(struct ql_adapter *qdev)
3625{
3626 int i;
3627 struct rx_ring *rx_ring;
3628 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003629 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00003630 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3631 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3632
3633 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003634
Ron Mercera4ab6132009-08-27 11:02:10 +00003635 /* In a perfect world we have one RSS ring for each CPU
3636 * and each has it's own vector. To do that we ask for
3637 * cpu_cnt vectors. ql_enable_msix() will adjust the
3638 * vector count to what we actually get. We then
3639 * allocate an RSS ring for each.
3640 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003642 qdev->intr_count = cpu_cnt;
3643 ql_enable_msix(qdev);
3644 /* Adjust the RSS ring count to the actual vector count. */
3645 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003646 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003647 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003648
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003649 for (i = 0; i < qdev->tx_ring_count; i++) {
3650 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003651 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003652 tx_ring->qdev = qdev;
3653 tx_ring->wq_id = i;
3654 tx_ring->wq_len = qdev->tx_ring_size;
3655 tx_ring->wq_size =
3656 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3657
3658 /*
3659 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00003660 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003661 */
Ron Mercer39aa8162009-08-27 11:02:11 +00003662 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003663 }
3664
3665 for (i = 0; i < qdev->rx_ring_count; i++) {
3666 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003667 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003668 rx_ring->qdev = qdev;
3669 rx_ring->cq_id = i;
3670 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003671 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00003672 /*
3673 * Inbound (RSS) queues.
3674 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003675 rx_ring->cq_len = qdev->rx_ring_size;
3676 rx_ring->cq_size =
3677 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3678 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3679 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003680 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00003681 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3682 QPRINTK(qdev, IFUP, DEBUG,
3683 "lbq_buf_size %d, order = %d\n",
3684 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003685 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3686 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003687 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00003688 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003689 rx_ring->type = RX_Q;
3690 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003691 /*
3692 * Outbound queue handles outbound completions only.
3693 */
3694 /* outbound cq is same size as tx_ring it services. */
3695 rx_ring->cq_len = qdev->tx_ring_size;
3696 rx_ring->cq_size =
3697 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3698 rx_ring->lbq_len = 0;
3699 rx_ring->lbq_size = 0;
3700 rx_ring->lbq_buf_size = 0;
3701 rx_ring->sbq_len = 0;
3702 rx_ring->sbq_size = 0;
3703 rx_ring->sbq_buf_size = 0;
3704 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003705 }
3706 }
3707 return 0;
3708}
3709
3710static int qlge_open(struct net_device *ndev)
3711{
3712 int err = 0;
3713 struct ql_adapter *qdev = netdev_priv(ndev);
3714
3715 err = ql_configure_rings(qdev);
3716 if (err)
3717 return err;
3718
3719 err = ql_get_adapter_resources(qdev);
3720 if (err)
3721 goto error_up;
3722
3723 err = ql_adapter_up(qdev);
3724 if (err)
3725 goto error_up;
3726
3727 return err;
3728
3729error_up:
3730 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003731 return err;
3732}
3733
Ron Mercer7c734352009-10-19 03:32:19 +00003734static int ql_change_rx_buffers(struct ql_adapter *qdev)
3735{
3736 struct rx_ring *rx_ring;
3737 int i, status;
3738 u32 lbq_buf_len;
3739
3740 /* Wait for an oustanding reset to complete. */
3741 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3742 int i = 3;
3743 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3744 QPRINTK(qdev, IFUP, ERR,
3745 "Waiting for adapter UP...\n");
3746 ssleep(1);
3747 }
3748
3749 if (!i) {
3750 QPRINTK(qdev, IFUP, ERR,
3751 "Timed out waiting for adapter UP\n");
3752 return -ETIMEDOUT;
3753 }
3754 }
3755
3756 status = ql_adapter_down(qdev);
3757 if (status)
3758 goto error;
3759
3760 /* Get the new rx buffer size. */
3761 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3762 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3763 qdev->lbq_buf_order = get_order(lbq_buf_len);
3764
3765 for (i = 0; i < qdev->rss_ring_count; i++) {
3766 rx_ring = &qdev->rx_ring[i];
3767 /* Set the new size. */
3768 rx_ring->lbq_buf_size = lbq_buf_len;
3769 }
3770
3771 status = ql_adapter_up(qdev);
3772 if (status)
3773 goto error;
3774
3775 return status;
3776error:
3777 QPRINTK(qdev, IFUP, ALERT,
3778 "Driver up/down cycle failed, closing device.\n");
3779 set_bit(QL_ADAPTER_UP, &qdev->flags);
3780 dev_close(qdev->ndev);
3781 return status;
3782}
3783
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003784static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3785{
3786 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00003787 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003788
3789 if (ndev->mtu == 1500 && new_mtu == 9000) {
3790 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3791 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3792 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3793 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3794 (ndev->mtu == 9000 && new_mtu == 9000)) {
3795 return 0;
3796 } else
3797 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00003798
3799 queue_delayed_work(qdev->workqueue,
3800 &qdev->mpi_port_cfg_work, 3*HZ);
3801
3802 if (!netif_running(qdev->ndev)) {
3803 ndev->mtu = new_mtu;
3804 return 0;
3805 }
3806
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003807 ndev->mtu = new_mtu;
Ron Mercer7c734352009-10-19 03:32:19 +00003808 status = ql_change_rx_buffers(qdev);
3809 if (status) {
3810 QPRINTK(qdev, IFUP, ERR,
3811 "Changing MTU failed.\n");
3812 }
3813
3814 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003815}
3816
3817static struct net_device_stats *qlge_get_stats(struct net_device
3818 *ndev)
3819{
Ajit Khapardebcc90f52009-10-07 02:46:09 +00003820 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003821}
3822
3823static void qlge_set_multicast_list(struct net_device *ndev)
3824{
3825 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3826 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00003827 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003828
Ron Mercercc288f52009-02-23 10:42:14 +00003829 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3830 if (status)
3831 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003832 /*
3833 * Set or clear promiscuous mode if a
3834 * transition is taking place.
3835 */
3836 if (ndev->flags & IFF_PROMISC) {
3837 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3838 if (ql_set_routing_reg
3839 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3840 QPRINTK(qdev, HW, ERR,
3841 "Failed to set promiscous mode.\n");
3842 } else {
3843 set_bit(QL_PROMISCUOUS, &qdev->flags);
3844 }
3845 }
3846 } else {
3847 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3848 if (ql_set_routing_reg
3849 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3850 QPRINTK(qdev, HW, ERR,
3851 "Failed to clear promiscous mode.\n");
3852 } else {
3853 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3854 }
3855 }
3856 }
3857
3858 /*
3859 * Set or clear all multicast mode if a
3860 * transition is taking place.
3861 */
3862 if ((ndev->flags & IFF_ALLMULTI) ||
3863 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3864 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3865 if (ql_set_routing_reg
3866 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3867 QPRINTK(qdev, HW, ERR,
3868 "Failed to set all-multi mode.\n");
3869 } else {
3870 set_bit(QL_ALLMULTI, &qdev->flags);
3871 }
3872 }
3873 } else {
3874 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3875 if (ql_set_routing_reg
3876 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3877 QPRINTK(qdev, HW, ERR,
3878 "Failed to clear all-multi mode.\n");
3879 } else {
3880 clear_bit(QL_ALLMULTI, &qdev->flags);
3881 }
3882 }
3883 }
3884
3885 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00003886 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3887 if (status)
3888 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003889 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3890 i++, mc_ptr = mc_ptr->next)
3891 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3892 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3893 QPRINTK(qdev, HW, ERR,
3894 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00003895 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003896 goto exit;
3897 }
Ron Mercercc288f52009-02-23 10:42:14 +00003898 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003899 if (ql_set_routing_reg
3900 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3901 QPRINTK(qdev, HW, ERR,
3902 "Failed to set multicast match mode.\n");
3903 } else {
3904 set_bit(QL_ALLMULTI, &qdev->flags);
3905 }
3906 }
3907exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00003908 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003909}
3910
3911static int qlge_set_mac_address(struct net_device *ndev, void *p)
3912{
3913 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3914 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00003915 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003916
3917 if (netif_running(ndev))
3918 return -EBUSY;
3919
3920 if (!is_valid_ether_addr(addr->sa_data))
3921 return -EADDRNOTAVAIL;
3922 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3923
Ron Mercercc288f52009-02-23 10:42:14 +00003924 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3925 if (status)
3926 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00003927 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3928 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00003929 if (status)
3930 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3931 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3932 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933}
3934
3935static void qlge_tx_timeout(struct net_device *ndev)
3936{
3937 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003938 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003939}
3940
3941static void ql_asic_reset_work(struct work_struct *work)
3942{
3943 struct ql_adapter *qdev =
3944 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00003945 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003946 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00003947 status = ql_adapter_down(qdev);
3948 if (status)
3949 goto error;
3950
3951 status = ql_adapter_up(qdev);
3952 if (status)
3953 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00003954
3955 /* Restore rx mode. */
3956 clear_bit(QL_ALLMULTI, &qdev->flags);
3957 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3958 qlge_set_multicast_list(qdev->ndev);
3959
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003960 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00003961 return;
3962error:
3963 QPRINTK(qdev, IFUP, ALERT,
3964 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003965
Ron Mercerdb988122009-03-09 10:59:17 +00003966 set_bit(QL_ADAPTER_UP, &qdev->flags);
3967 dev_close(qdev->ndev);
3968 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003969}
3970
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003971static struct nic_operations qla8012_nic_ops = {
3972 .get_flash = ql_get_8012_flash_params,
3973 .port_initialize = ql_8012_port_initialize,
3974};
3975
Ron Mercercdca8d02009-03-02 08:07:31 +00003976static struct nic_operations qla8000_nic_ops = {
3977 .get_flash = ql_get_8000_flash_params,
3978 .port_initialize = ql_8000_port_initialize,
3979};
3980
Ron Mercere4552f52009-06-09 05:39:32 +00003981/* Find the pcie function number for the other NIC
3982 * on this chip. Since both NIC functions share a
3983 * common firmware we have the lowest enabled function
3984 * do any common work. Examples would be resetting
3985 * after a fatal firmware error, or doing a firmware
3986 * coredump.
3987 */
3988static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003989{
Ron Mercere4552f52009-06-09 05:39:32 +00003990 int status = 0;
3991 u32 temp;
3992 u32 nic_func1, nic_func2;
3993
3994 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3995 &temp);
3996 if (status)
3997 return status;
3998
3999 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4000 MPI_TEST_NIC_FUNC_MASK);
4001 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4002 MPI_TEST_NIC_FUNC_MASK);
4003
4004 if (qdev->func == nic_func1)
4005 qdev->alt_func = nic_func2;
4006 else if (qdev->func == nic_func2)
4007 qdev->alt_func = nic_func1;
4008 else
4009 status = -EIO;
4010
4011 return status;
4012}
4013
4014static int ql_get_board_info(struct ql_adapter *qdev)
4015{
4016 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004017 qdev->func =
4018 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004019 if (qdev->func > 3)
4020 return -EIO;
4021
4022 status = ql_get_alt_pcie_func(qdev);
4023 if (status)
4024 return status;
4025
4026 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4027 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004028 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4029 qdev->port_link_up = STS_PL1;
4030 qdev->port_init = STS_PI1;
4031 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4032 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4033 } else {
4034 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4035 qdev->port_link_up = STS_PL0;
4036 qdev->port_init = STS_PI0;
4037 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4038 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4039 }
4040 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004041 qdev->device_id = qdev->pdev->device;
4042 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4043 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004044 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4045 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004046 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004047}
4048
4049static void ql_release_all(struct pci_dev *pdev)
4050{
4051 struct net_device *ndev = pci_get_drvdata(pdev);
4052 struct ql_adapter *qdev = netdev_priv(ndev);
4053
4054 if (qdev->workqueue) {
4055 destroy_workqueue(qdev->workqueue);
4056 qdev->workqueue = NULL;
4057 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004058
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004059 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004060 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004061 if (qdev->doorbell_area)
4062 iounmap(qdev->doorbell_area);
4063 pci_release_regions(pdev);
4064 pci_set_drvdata(pdev, NULL);
4065}
4066
4067static int __devinit ql_init_device(struct pci_dev *pdev,
4068 struct net_device *ndev, int cards_found)
4069{
4070 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004071 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004072
Ron Mercere3324712009-07-02 06:06:13 +00004073 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004074 err = pci_enable_device(pdev);
4075 if (err) {
4076 dev_err(&pdev->dev, "PCI device enable failed.\n");
4077 return err;
4078 }
4079
Ron Mercerebd6e772009-09-29 08:39:25 +00004080 qdev->ndev = ndev;
4081 qdev->pdev = pdev;
4082 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004083
Ron Mercerbc9167f2009-10-10 09:35:04 +00004084 /* Set PCIe read request size */
4085 err = pcie_set_readrq(pdev, 4096);
4086 if (err) {
4087 dev_err(&pdev->dev, "Set readrq failed.\n");
4088 goto err_out;
4089 }
4090
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004091 err = pci_request_regions(pdev, DRV_NAME);
4092 if (err) {
4093 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004094 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004095 }
4096
4097 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004098 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004099 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004100 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004101 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004102 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004103 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004104 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004105 }
4106
4107 if (err) {
4108 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4109 goto err_out;
4110 }
4111
Ron Mercer6d190c62009-10-28 08:39:20 +00004112 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004113 qdev->reg_base =
4114 ioremap_nocache(pci_resource_start(pdev, 1),
4115 pci_resource_len(pdev, 1));
4116 if (!qdev->reg_base) {
4117 dev_err(&pdev->dev, "Register mapping failed.\n");
4118 err = -ENOMEM;
4119 goto err_out;
4120 }
4121
4122 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4123 qdev->doorbell_area =
4124 ioremap_nocache(pci_resource_start(pdev, 3),
4125 pci_resource_len(pdev, 3));
4126 if (!qdev->doorbell_area) {
4127 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4128 err = -ENOMEM;
4129 goto err_out;
4130 }
4131
Ron Mercere4552f52009-06-09 05:39:32 +00004132 err = ql_get_board_info(qdev);
4133 if (err) {
4134 dev_err(&pdev->dev, "Register access failed.\n");
4135 err = -EIO;
4136 goto err_out;
4137 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004138 qdev->msg_enable = netif_msg_init(debug, default_msg);
4139 spin_lock_init(&qdev->hw_lock);
4140 spin_lock_init(&qdev->stats_lock);
4141
4142 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004143 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004144 if (err) {
4145 dev_err(&pdev->dev, "Invalid FLASH.\n");
4146 goto err_out;
4147 }
4148
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004149 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4150
4151 /* Set up the default ring sizes. */
4152 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4153 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4154
4155 /* Set up the coalescing parameters. */
4156 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4157 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4158 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4159 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4160
4161 /*
4162 * Set up the operating parameters.
4163 */
4164 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004165 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4166 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4167 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4168 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004169 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004170 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004171 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004172
4173 if (!cards_found) {
4174 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4175 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4176 DRV_NAME, DRV_VERSION);
4177 }
4178 return 0;
4179err_out:
4180 ql_release_all(pdev);
4181 pci_disable_device(pdev);
4182 return err;
4183}
4184
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004185static const struct net_device_ops qlge_netdev_ops = {
4186 .ndo_open = qlge_open,
4187 .ndo_stop = qlge_close,
4188 .ndo_start_xmit = qlge_send,
4189 .ndo_change_mtu = qlge_change_mtu,
4190 .ndo_get_stats = qlge_get_stats,
4191 .ndo_set_multicast_list = qlge_set_multicast_list,
4192 .ndo_set_mac_address = qlge_set_mac_address,
4193 .ndo_validate_addr = eth_validate_addr,
4194 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004195 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4196 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4197 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004198};
4199
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004200static int __devinit qlge_probe(struct pci_dev *pdev,
4201 const struct pci_device_id *pci_entry)
4202{
4203 struct net_device *ndev = NULL;
4204 struct ql_adapter *qdev = NULL;
4205 static int cards_found = 0;
4206 int err = 0;
4207
Ron Mercer1e213302009-03-09 10:59:21 +00004208 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4209 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004210 if (!ndev)
4211 return -ENOMEM;
4212
4213 err = ql_init_device(pdev, ndev, cards_found);
4214 if (err < 0) {
4215 free_netdev(ndev);
4216 return err;
4217 }
4218
4219 qdev = netdev_priv(ndev);
4220 SET_NETDEV_DEV(ndev, &pdev->dev);
4221 ndev->features = (0
4222 | NETIF_F_IP_CSUM
4223 | NETIF_F_SG
4224 | NETIF_F_TSO
4225 | NETIF_F_TSO6
4226 | NETIF_F_TSO_ECN
4227 | NETIF_F_HW_VLAN_TX
4228 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004229 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004230
4231 if (test_bit(QL_DMA64, &qdev->flags))
4232 ndev->features |= NETIF_F_HIGHDMA;
4233
4234 /*
4235 * Set up net_device structure.
4236 */
4237 ndev->tx_queue_len = qdev->tx_ring_size;
4238 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004239
4240 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004241 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004242 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004243
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004244 err = register_netdev(ndev);
4245 if (err) {
4246 dev_err(&pdev->dev, "net device registration failed.\n");
4247 ql_release_all(pdev);
4248 pci_disable_device(pdev);
4249 return err;
4250 }
Ron Mercer6a473302009-07-02 06:06:12 +00004251 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004252 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004253 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004254 cards_found++;
4255 return 0;
4256}
4257
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004258netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4259{
4260 return qlge_send(skb, ndev);
4261}
4262
4263int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4264{
4265 return ql_clean_inbound_rx_ring(rx_ring, budget);
4266}
4267
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004268static void __devexit qlge_remove(struct pci_dev *pdev)
4269{
4270 struct net_device *ndev = pci_get_drvdata(pdev);
4271 unregister_netdev(ndev);
4272 ql_release_all(pdev);
4273 pci_disable_device(pdev);
4274 free_netdev(ndev);
4275}
4276
Ron Mercer6d190c62009-10-28 08:39:20 +00004277/* Clean up resources without touching hardware. */
4278static void ql_eeh_close(struct net_device *ndev)
4279{
4280 int i;
4281 struct ql_adapter *qdev = netdev_priv(ndev);
4282
4283 if (netif_carrier_ok(ndev)) {
4284 netif_carrier_off(ndev);
4285 netif_stop_queue(ndev);
4286 }
4287
4288 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4289 cancel_delayed_work_sync(&qdev->asic_reset_work);
4290 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4291 cancel_delayed_work_sync(&qdev->mpi_work);
4292 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4293 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4294
4295 for (i = 0; i < qdev->rss_ring_count; i++)
4296 netif_napi_del(&qdev->rx_ring[i].napi);
4297
4298 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4299 ql_tx_ring_clean(qdev);
4300 ql_free_rx_buffers(qdev);
4301 ql_release_adapter_resources(qdev);
4302}
4303
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004304/*
4305 * This callback is called by the PCI subsystem whenever
4306 * a PCI bus error is detected.
4307 */
4308static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4309 enum pci_channel_state state)
4310{
4311 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004312
Ron Mercer6d190c62009-10-28 08:39:20 +00004313 switch (state) {
4314 case pci_channel_io_normal:
4315 return PCI_ERS_RESULT_CAN_RECOVER;
4316 case pci_channel_io_frozen:
4317 netif_device_detach(ndev);
4318 if (netif_running(ndev))
4319 ql_eeh_close(ndev);
4320 pci_disable_device(pdev);
4321 return PCI_ERS_RESULT_NEED_RESET;
4322 case pci_channel_io_perm_failure:
4323 dev_err(&pdev->dev,
4324 "%s: pci_channel_io_perm_failure.\n", __func__);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004325 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004326 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327
4328 /* Request a slot reset. */
4329 return PCI_ERS_RESULT_NEED_RESET;
4330}
4331
4332/*
4333 * This callback is called after the PCI buss has been reset.
4334 * Basically, this tries to restart the card from scratch.
4335 * This is a shortened version of the device probe/discovery code,
4336 * it resembles the first-half of the () routine.
4337 */
4338static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4339{
4340 struct net_device *ndev = pci_get_drvdata(pdev);
4341 struct ql_adapter *qdev = netdev_priv(ndev);
4342
Ron Mercer6d190c62009-10-28 08:39:20 +00004343 pdev->error_state = pci_channel_io_normal;
4344
4345 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004346 if (pci_enable_device(pdev)) {
4347 QPRINTK(qdev, IFUP, ERR,
4348 "Cannot re-enable PCI device after reset.\n");
4349 return PCI_ERS_RESULT_DISCONNECT;
4350 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004351 pci_set_master(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004352 return PCI_ERS_RESULT_RECOVERED;
4353}
4354
4355static void qlge_io_resume(struct pci_dev *pdev)
4356{
4357 struct net_device *ndev = pci_get_drvdata(pdev);
4358 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004359 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004360
Ron Mercer6d190c62009-10-28 08:39:20 +00004361 if (ql_adapter_reset(qdev))
4362 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004363 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004364 err = qlge_open(ndev);
4365 if (err) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004366 QPRINTK(qdev, IFUP, ERR,
4367 "Device initialization failed after reset.\n");
4368 return;
4369 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004370 } else {
4371 QPRINTK(qdev, IFUP, ERR,
4372 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004373 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004374 netif_device_attach(ndev);
4375}
4376
4377static struct pci_error_handlers qlge_err_handler = {
4378 .error_detected = qlge_io_error_detected,
4379 .slot_reset = qlge_io_slot_reset,
4380 .resume = qlge_io_resume,
4381};
4382
4383static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4384{
4385 struct net_device *ndev = pci_get_drvdata(pdev);
4386 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004387 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004388
4389 netif_device_detach(ndev);
4390
4391 if (netif_running(ndev)) {
4392 err = ql_adapter_down(qdev);
4393 if (!err)
4394 return err;
4395 }
4396
Ron Mercerbc083ce2009-10-21 11:07:40 +00004397 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004398 err = pci_save_state(pdev);
4399 if (err)
4400 return err;
4401
4402 pci_disable_device(pdev);
4403
4404 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4405
4406 return 0;
4407}
4408
David S. Miller04da2cf2008-09-19 16:14:24 -07004409#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004410static int qlge_resume(struct pci_dev *pdev)
4411{
4412 struct net_device *ndev = pci_get_drvdata(pdev);
4413 struct ql_adapter *qdev = netdev_priv(ndev);
4414 int err;
4415
4416 pci_set_power_state(pdev, PCI_D0);
4417 pci_restore_state(pdev);
4418 err = pci_enable_device(pdev);
4419 if (err) {
4420 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4421 return err;
4422 }
4423 pci_set_master(pdev);
4424
4425 pci_enable_wake(pdev, PCI_D3hot, 0);
4426 pci_enable_wake(pdev, PCI_D3cold, 0);
4427
4428 if (netif_running(ndev)) {
4429 err = ql_adapter_up(qdev);
4430 if (err)
4431 return err;
4432 }
4433
4434 netif_device_attach(ndev);
4435
4436 return 0;
4437}
David S. Miller04da2cf2008-09-19 16:14:24 -07004438#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004439
4440static void qlge_shutdown(struct pci_dev *pdev)
4441{
4442 qlge_suspend(pdev, PMSG_SUSPEND);
4443}
4444
4445static struct pci_driver qlge_driver = {
4446 .name = DRV_NAME,
4447 .id_table = qlge_pci_tbl,
4448 .probe = qlge_probe,
4449 .remove = __devexit_p(qlge_remove),
4450#ifdef CONFIG_PM
4451 .suspend = qlge_suspend,
4452 .resume = qlge_resume,
4453#endif
4454 .shutdown = qlge_shutdown,
4455 .err_handler = &qlge_err_handler
4456};
4457
4458static int __init qlge_init_module(void)
4459{
4460 return pci_register_driver(&qlge_driver);
4461}
4462
4463static void __exit qlge_exit(void)
4464{
4465 pci_unregister_driver(&qlge_driver);
4466}
4467
4468module_init(qlge_init_module);
4469module_exit(qlge_exit);