blob: 894a7c84faeff851d2de5ca2a6112fe3107a7bd5 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000077 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
Ron Mercer4322c5b2009-07-02 06:06:06 +0000216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400354 case MAC_ADDR_TYPE_CAM_MAC:
355 {
356 u32 cam_output;
357 u32 upper = (addr[0] << 8) | addr[1];
358 u32 lower =
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]);
361
Ron Mercer49740972009-02-26 10:08:36 +0000362 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700363 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400364 " at index %d in the CAM.\n",
365 ((type ==
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700367 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400368
369 status =
370 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800371 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 if (status)
373 goto exit;
374 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375 (index << MAC_ADDR_IDX_SHIFT) | /* index */
376 type); /* type */
377 ql_write32(qdev, MAC_ADDR_DATA, lower);
378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, upper);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
398 */
Ron Mercer76b26692009-10-08 09:54:40 +0000399 cam_output = (CAM_OUT_ROUTE_NIC |
400 (qdev->
401 func << CAM_OUT_FUNC_SHIFT) |
402 (0 << CAM_OUT_CQ_ID_SHIFT));
403 if (qdev->vlgrp)
404 cam_output |= CAM_OUT_RV;
405 /* route to NIC core */
406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400407 break;
408 }
409 case MAC_ADDR_TYPE_VLAN:
410 {
411 u32 enable_bit = *((u32 *) &addr[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
416 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit ? "Adding" : "Removing"),
419 index, (enable_bit ? "to" : "from"));
420
421 status =
422 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800423 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400424 if (status)
425 goto exit;
426 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427 (index << MAC_ADDR_IDX_SHIFT) | /* index */
428 type | /* type */
429 enable_bit); /* enable/disable */
430 break;
431 }
432 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default:
434 QPRINTK(qdev, IFUP, CRIT,
435 "Address type %d not yet supported.\n", type);
436 status = -EPERM;
437 }
438exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400439 return status;
440}
441
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000442/* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
445 */
446static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447{
448 int status;
449 char zero_mac_addr[ETH_ALEN];
450 char *addr;
451
452 if (set) {
453 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG,
462 "Clearing MAC address on %s\n",
463 qdev->ndev->name);
464 }
465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466 if (status)
467 return status;
468 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status)
472 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473 "address.\n");
474 return status;
475}
476
Ron Mercer6a473302009-07-02 06:06:12 +0000477void ql_link_on(struct ql_adapter *qdev)
478{
479 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480 qdev->ndev->name);
481 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1);
483}
484
485void ql_link_off(struct ql_adapter *qdev)
486{
487 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488 qdev->ndev->name);
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 QPRINTK(qdev, IFUP, DEBUG,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable ? "Adding" : "Removing"),
528 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530 ((index ==
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545 (enable ? "to" : "from"));
546
547 switch (mask) {
548 case RT_IDX_CAM_HIT:
549 {
550 value = RT_IDX_DST_CAM_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break;
554 }
555 case RT_IDX_VALID: /* Promiscuous Mode frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560 break;
561 }
562 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
563 {
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
577 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000578 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
583 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
584 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000585 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588 break;
589 }
590 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
591 {
592 value = RT_IDX_DST_RSS | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595 break;
596 }
597 case 0: /* Clear the E-bit on an entry. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (index << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 default:
605 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606 mask);
607 status = -EPERM;
608 goto exit;
609 }
610
611 if (value) {
612 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613 if (status)
614 goto exit;
615 value |= (enable ? RT_IDX_E : 0);
616 ql_write32(qdev, RT_IDX, value);
617 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 }
619exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400620 return status;
621}
622
623static void ql_enable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626}
627
628static void ql_disable_interrupts(struct ql_adapter *qdev)
629{
630 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631}
632
633/* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
638 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700639u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400640{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700641 u32 var = 0;
642 unsigned long hw_flags = 0;
643 struct intr_context *ctx = qdev->intr_context + intr;
644
645 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
648 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700650 ctx->intr_en_mask);
651 var = ql_read32(qdev, STS);
652 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400653 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700654
655 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656 if (atomic_dec_and_test(&ctx->irq_cnt)) {
657 ql_write32(qdev, INTR_EN,
658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 }
661 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663}
664
665static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666{
667 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674 return 0;
675
676 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000677 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700680 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 var = ql_read32(qdev, STS);
682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000684 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400685 return var;
686}
687
688static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689{
690 int i;
691 for (i = 0; i < qdev->intr_count; i++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
695 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700696 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697 i == 0))
698 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400699 ql_enable_completion_interrupt(qdev, i);
700 }
701
702}
703
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000704static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705{
706 int status, i;
707 u16 csum = 0;
708 __le16 *flash = (__le16 *)&qdev->flash;
709
710 status = strncmp((char *)&qdev->flash, str, 4);
711 if (status) {
712 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713 return status;
714 }
715
716 for (i = 0; i < size; i++)
717 csum += le16_to_cpu(*flash++);
718
719 if (csum)
720 QPRINTK(qdev, IFUP, ERR,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723 return csum;
724}
725
Ron Mercer26351472009-02-02 13:53:57 -0800726static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400727{
728 int status = 0;
729 /* wait for reg to come ready */
730 status = ql_wait_reg_rdy(qdev,
731 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732 if (status)
733 goto exit;
734 /* set up for reg read */
735 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736 /* wait for reg to come ready */
737 status = ql_wait_reg_rdy(qdev,
738 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739 if (status)
740 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
744 */
745 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400746exit:
747 return status;
748}
749
Ron Mercercdca8d02009-03-02 08:07:31 +0000750static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751{
752 u32 i, size;
753 int status;
754 __le32 *p = (__le32 *)&qdev->flash;
755 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000756 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000757
758 /* Get flash offset for function and adjust
759 * for dword access.
760 */
Ron Mercere4552f52009-06-09 05:39:32 +0000761 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000762 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763 else
764 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767 return -ETIMEDOUT;
768
769 size = sizeof(struct flash_params_8000) / sizeof(u32);
770 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p);
772 if (status) {
773 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774 goto exit;
775 }
776 }
777
778 status = ql_validate_flash(qdev,
779 sizeof(struct flash_params_8000) / sizeof(u16),
780 "8000");
781 if (status) {
782 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783 status = -EINVAL;
784 goto exit;
785 }
786
Ron Mercer542512e2009-06-09 05:39:33 +0000787 /* Extract either manufacturer or BOFM modified
788 * MAC address.
789 */
790 if (qdev->flash.flash_params_8000.data_type1 == 2)
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr1,
793 qdev->ndev->addr_len);
794 else
795 memcpy(mac_addr,
796 qdev->flash.flash_params_8000.mac_addr,
797 qdev->ndev->addr_len);
798
799 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000800 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801 status = -EINVAL;
802 goto exit;
803 }
804
805 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000806 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000807 qdev->ndev->addr_len);
808
809exit:
810 ql_sem_unlock(qdev, SEM_FLASH_MASK);
811 return status;
812}
813
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000814static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400815{
816 int i;
817 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800818 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800819 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000820 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800821
822 /* Second function's parameters follow the first
823 * function's.
824 */
Ron Mercere4552f52009-06-09 05:39:32 +0000825 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400827
828 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829 return -ETIMEDOUT;
830
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000831 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800832 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400833 if (status) {
834 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835 goto exit;
836 }
837
838 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000839
840 status = ql_validate_flash(qdev,
841 sizeof(struct flash_params_8012) / sizeof(u16),
842 "8012");
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845 status = -EINVAL;
846 goto exit;
847 }
848
849 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 status = -EINVAL;
851 goto exit;
852 }
853
854 memcpy(qdev->ndev->dev_addr,
855 qdev->flash.flash_params_8012.mac_addr,
856 qdev->ndev->addr_len);
857
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400858exit:
859 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 return status;
861}
862
863/* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
866 */
867static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868{
869 int status;
870 /* wait for reg to come ready */
871 status = ql_wait_reg_rdy(qdev,
872 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 if (status)
874 return status;
875 /* write the data to the data reg */
876 ql_write32(qdev, XGMAC_DATA, data);
877 /* trigger the write */
878 ql_write32(qdev, XGMAC_ADDR, reg);
879 return status;
880}
881
882/* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
885 */
886int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887{
888 int status = 0;
889 /* wait for reg to come ready */
890 status = ql_wait_reg_rdy(qdev,
891 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892 if (status)
893 goto exit;
894 /* set up for reg read */
895 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 goto exit;
901 /* get the data */
902 *data = ql_read32(qdev, XGMAC_DATA);
903exit:
904 return status;
905}
906
907/* This is used for reading the 64-bit statistics regs. */
908int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909{
910 int status = 0;
911 u32 hi = 0;
912 u32 lo = 0;
913
914 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 if (status)
916 goto exit;
917
918 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 if (status)
920 goto exit;
921
922 *data = (u64) lo | ((u64) hi << 32);
923
924exit:
925 return status;
926}
927
Ron Mercercdca8d02009-03-02 08:07:31 +0000928static int ql_8000_port_initialize(struct ql_adapter *qdev)
929{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000930 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000931 /*
932 * Get MPI firmware version for driver banner
933 * and ethool info.
934 */
935 status = ql_mb_about_fw(qdev);
936 if (status)
937 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000938 status = ql_mb_get_fw_state(qdev);
939 if (status)
940 goto exit;
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943exit:
944 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000945}
946
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400947/* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
951 * later date.
952 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000953static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400954{
955 int status = 0;
956 u32 data;
957
958 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
961 */
962 QPRINTK(qdev, LINK, INFO,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965 if (status) {
966 QPRINTK(qdev, LINK, CRIT,
967 "Port initialize timed out.\n");
968 }
969 return status;
970 }
971
972 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975 if (status)
976 goto end;
977 data |= GLOBAL_CFG_RESET;
978 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 if (status)
980 goto end;
981
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
984 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
985 data |= GLOBAL_CFG_TX_STAT_EN;
986 data |= GLOBAL_CFG_RX_STAT_EN;
987 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 if (status)
989 goto end;
990
991 /* Enable transmitter, and clear it's reset. */
992 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993 if (status)
994 goto end;
995 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
996 data |= TX_CFG_EN; /* Enable the transmitter. */
997 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable receiver and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1006 data |= RX_CFG_EN; /* Enable the receiver. */
1007 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Turn on jumbo. */
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 if (status)
1015 goto end;
1016 status =
1017 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 if (status)
1019 goto end;
1020
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023end:
1024 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 return status;
1026}
1027
Ron Mercer7c734352009-10-19 03:32:19 +00001028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001033/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001035{
1036 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1037 rx_ring->lbq_curr_idx++;
1038 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1039 rx_ring->lbq_curr_idx = 0;
1040 rx_ring->lbq_free_cnt++;
1041 return lbq_desc;
1042}
1043
Ron Mercer7c734352009-10-19 03:32:19 +00001044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001068{
1069 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1070 rx_ring->sbq_curr_idx++;
1071 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1072 rx_ring->sbq_curr_idx = 0;
1073 rx_ring->sbq_free_cnt++;
1074 return sbq_desc;
1075}
1076
1077/* Update an rx ring index. */
1078static void ql_update_cq(struct rx_ring *rx_ring)
1079{
1080 rx_ring->cnsmr_idx++;
1081 rx_ring->curr_entry++;
1082 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1083 rx_ring->cnsmr_idx = 0;
1084 rx_ring->curr_entry = rx_ring->cq_base;
1085 }
1086}
1087
1088static void ql_write_cq_idx(struct rx_ring *rx_ring)
1089{
1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1091}
1092
Ron Mercer7c734352009-10-19 03:32:19 +00001093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001140/* Process (refill) a large buffer queue. */
1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1142{
Ron Mercer49f21862009-02-23 10:42:16 +00001143 u32 clean_idx = rx_ring->lbq_clean_idx;
1144 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001145 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001146 u64 map;
1147 int i;
1148
Ron Mercer7c734352009-10-19 03:32:19 +00001149 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 for (i = 0; i < 16; i++) {
1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1152 "lbq: try cleaning clean_idx = %d.\n",
1153 clean_idx);
1154 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1156 QPRINTK(qdev, IFUP, ERR,
1157 "Could not get a page chunk.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001158 return;
1159 }
Ron Mercer7c734352009-10-19 03:32:19 +00001160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
Ron Mercer7c734352009-10-19 03:32:19 +00001164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001166 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001184 QPRINTK(qdev, RX_STATUS, DEBUG,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
Ron Mercer49f21862009-02-23 10:42:16 +00001195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = 0; i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 QPRINTK(qdev, RX_STATUS, DEBUG,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 if (sbq_desc->p.skb == NULL) {
1208 QPRINTK(qdev, RX_STATUS, DEBUG,
1209 "sbq: getting new skb for index %d.\n",
1210 sbq_desc->index);
1211 sbq_desc->p.skb =
1212 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001213 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001214 if (sbq_desc->p.skb == NULL) {
1215 QPRINTK(qdev, PROBE, ERR,
1216 "Couldn't get an skb.\n");
1217 rx_ring->sbq_clean_idx = clean_idx;
1218 return;
1219 }
1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221 map = pci_map_single(qdev->pdev,
1222 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001223 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001225 if (pci_dma_mapping_error(qdev->pdev, map)) {
1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001232 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1233 pci_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001250 QPRINTK(qdev, RX_STATUS, DEBUG,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
1284 QPRINTK(qdev, TX_DONE, DEBUG,
1285 "unmapping OAL area.\n");
1286 }
1287 pci_unmap_single(qdev->pdev,
1288 pci_unmap_addr(&tx_ring_desc->map[i],
1289 mapaddr),
1290 pci_unmap_len(&tx_ring_desc->map[i],
1291 maplen),
1292 PCI_DMA_TODEVICE);
1293 } else {
1294 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1295 i);
1296 pci_unmap_page(qdev->pdev,
1297 pci_unmap_addr(&tx_ring_desc->map[i],
1298 mapaddr),
1299 pci_unmap_len(&tx_ring_desc->map[i],
1300 maplen), PCI_DMA_TODEVICE);
1301 }
1302 }
1303
1304}
1305
1306/* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308 */
1309static int ql_map_send(struct ql_adapter *qdev,
1310 struct ob_mac_iocb_req *mac_iocb_ptr,
1311 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312{
1313 int len = skb_headlen(skb);
1314 dma_addr_t map;
1315 int frag_idx, err, map_idx = 0;
1316 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319 if (frag_cnt) {
1320 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
1329 QPRINTK(qdev, TX_QUEUED, ERR,
1330 "PCI mapping failed with error: %d\n", err);
1331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
1337 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
1376 QPRINTK(qdev, TX_QUEUED, ERR,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
1379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
1391 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392 map);
1393 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
1399 map =
1400 pci_map_page(qdev->pdev, frag->page,
1401 frag->page_offset, frag->size,
1402 PCI_DMA_TODEVICE);
1403
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) {
1406 QPRINTK(qdev, TX_QUEUED, ERR,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
1409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(frag->size);
1414 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 frag->size);
1417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001436static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001437{
1438 void *temp_addr = skb->data;
1439
1440 /* Undo the skb_reserve(skb,32) we did before
1441 * giving to hardware, and realign data on
1442 * a 2-byte boundary.
1443 */
1444 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1445 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1446 skb_copy_to_linear_data(skb, temp_addr,
1447 (unsigned int)len);
1448}
1449
1450/*
1451 * This function builds an skb for the given inbound
1452 * completion. It will be rewritten for readability in the near
1453 * future, but for not it works well.
1454 */
1455static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1456 struct rx_ring *rx_ring,
1457 struct ib_mac_iocb_rsp *ib_mac_rsp)
1458{
1459 struct bq_desc *lbq_desc;
1460 struct bq_desc *sbq_desc;
1461 struct sk_buff *skb = NULL;
1462 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1463 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1464
1465 /*
1466 * Handle the header buffer if present.
1467 */
1468 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1469 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1470 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1471 /*
1472 * Headers fit nicely into a small buffer.
1473 */
1474 sbq_desc = ql_get_curr_sbuf(rx_ring);
1475 pci_unmap_single(qdev->pdev,
1476 pci_unmap_addr(sbq_desc, mapaddr),
1477 pci_unmap_len(sbq_desc, maplen),
1478 PCI_DMA_FROMDEVICE);
1479 skb = sbq_desc->p.skb;
1480 ql_realign_skb(skb, hdr_len);
1481 skb_put(skb, hdr_len);
1482 sbq_desc->p.skb = NULL;
1483 }
1484
1485 /*
1486 * Handle the data buffer(s).
1487 */
1488 if (unlikely(!length)) { /* Is there data too? */
1489 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "No Data buffer in this packet.\n");
1491 return skb;
1492 }
1493
1494 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1495 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1496 QPRINTK(qdev, RX_STATUS, DEBUG,
1497 "Headers in small, data of %d bytes in small, combine them.\n", length);
1498 /*
1499 * Data is less than small buffer size so it's
1500 * stuffed in a small buffer.
1501 * For this case we append the data
1502 * from the "data" small buffer to the "header" small
1503 * buffer.
1504 */
1505 sbq_desc = ql_get_curr_sbuf(rx_ring);
1506 pci_dma_sync_single_for_cpu(qdev->pdev,
1507 pci_unmap_addr
1508 (sbq_desc, mapaddr),
1509 pci_unmap_len
1510 (sbq_desc, maplen),
1511 PCI_DMA_FROMDEVICE);
1512 memcpy(skb_put(skb, length),
1513 sbq_desc->p.skb->data, length);
1514 pci_dma_sync_single_for_device(qdev->pdev,
1515 pci_unmap_addr
1516 (sbq_desc,
1517 mapaddr),
1518 pci_unmap_len
1519 (sbq_desc,
1520 maplen),
1521 PCI_DMA_FROMDEVICE);
1522 } else {
1523 QPRINTK(qdev, RX_STATUS, DEBUG,
1524 "%d bytes in a single small buffer.\n", length);
1525 sbq_desc = ql_get_curr_sbuf(rx_ring);
1526 skb = sbq_desc->p.skb;
1527 ql_realign_skb(skb, length);
1528 skb_put(skb, length);
1529 pci_unmap_single(qdev->pdev,
1530 pci_unmap_addr(sbq_desc,
1531 mapaddr),
1532 pci_unmap_len(sbq_desc,
1533 maplen),
1534 PCI_DMA_FROMDEVICE);
1535 sbq_desc->p.skb = NULL;
1536 }
1537 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1538 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1539 QPRINTK(qdev, RX_STATUS, DEBUG,
1540 "Header in small, %d bytes in large. Chain large to small!\n", length);
1541 /*
1542 * The data is in a single large buffer. We
1543 * chain it to the header buffer's skb and let
1544 * it rip.
1545 */
Ron Mercer7c734352009-10-19 03:32:19 +00001546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001547 QPRINTK(qdev, RX_STATUS, DEBUG,
Ron Mercer7c734352009-10-19 03:32:19 +00001548 "Chaining page at offset = %d,"
1549 "for %d bytes to skb.\n",
1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset,
1553 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001554 skb->len += length;
1555 skb->data_len += length;
1556 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001557 } else {
1558 /*
1559 * The headers and data are in a single large buffer. We
1560 * copy it to a new skb and let it go. This can happen with
1561 * jumbo mtu on a non-TCP/UDP frame.
1562 */
Ron Mercer7c734352009-10-19 03:32:19 +00001563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001564 skb = netdev_alloc_skb(qdev->ndev, length);
1565 if (skb == NULL) {
1566 QPRINTK(qdev, PROBE, DEBUG,
1567 "No skb available, drop the packet.\n");
1568 return NULL;
1569 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001570 pci_unmap_page(qdev->pdev,
1571 pci_unmap_addr(lbq_desc,
1572 mapaddr),
1573 pci_unmap_len(lbq_desc, maplen),
1574 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001575 skb_reserve(skb, NET_IP_ALIGN);
1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
Ron Mercer7c734352009-10-19 03:32:19 +00001578 skb_fill_page_desc(skb, 0,
1579 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset,
1581 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001582 skb->len += length;
1583 skb->data_len += length;
1584 skb->truesize += length;
1585 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001586 __pskb_pull_tail(skb,
1587 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1588 VLAN_ETH_HLEN : ETH_HLEN);
1589 }
1590 } else {
1591 /*
1592 * The data is in a chain of large buffers
1593 * pointed to by a small buffer. We loop
1594 * thru and chain them to the our small header
1595 * buffer's skb.
1596 * frags: There are 18 max frags and our small
1597 * buffer will hold 32 of them. The thing is,
1598 * we'll use 3 max for our 9000 byte jumbo
1599 * frames. If the MTU goes up we could
1600 * eventually be in trouble.
1601 */
Ron Mercer7c734352009-10-19 03:32:19 +00001602 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001603 sbq_desc = ql_get_curr_sbuf(rx_ring);
1604 pci_unmap_single(qdev->pdev,
1605 pci_unmap_addr(sbq_desc, mapaddr),
1606 pci_unmap_len(sbq_desc, maplen),
1607 PCI_DMA_FROMDEVICE);
1608 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1609 /*
1610 * This is an non TCP/UDP IP frame, so
1611 * the headers aren't split into a small
1612 * buffer. We have to use the small buffer
1613 * that contains our sg list as our skb to
1614 * send upstairs. Copy the sg list here to
1615 * a local buffer and use it to find the
1616 * pages to chain.
1617 */
1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1619 "%d bytes of headers & data in chain of large.\n", length);
1620 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001621 sbq_desc->p.skb = NULL;
1622 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001623 }
1624 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001625 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1626 size = (length < rx_ring->lbq_buf_size) ? length :
1627 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001628
1629 QPRINTK(qdev, RX_STATUS, DEBUG,
1630 "Adding page %d to skb for %d bytes.\n",
1631 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001632 skb_fill_page_desc(skb, i,
1633 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset,
1635 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001636 skb->len += size;
1637 skb->data_len += size;
1638 skb->truesize += size;
1639 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001640 i++;
1641 }
1642 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1643 VLAN_ETH_HLEN : ETH_HLEN);
1644 }
1645 return skb;
1646}
1647
1648/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp)
1652{
1653 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL;
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001657
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659
1660 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1661 if (unlikely(!skb)) {
1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1663 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001664 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001665 return;
1666 }
1667
Ron Mercera32959c2009-06-09 05:39:27 +00001668 /* Frame error, so drop the packet. */
1669 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1670 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1671 ib_mac_rsp->flags2);
1672 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001673 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001674 return;
1675 }
Ron Mercerec33a492009-06-09 05:39:28 +00001676
1677 /* The max framesize filter on this chip is set higher than
1678 * MTU since FCoE uses 2k frames.
1679 */
1680 if (skb->len > ndev->mtu + ETH_HLEN) {
1681 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001682 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001683 return;
1684 }
1685
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001686 /* loopback self test for ethtool */
1687 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1688 ql_check_lb_frame(qdev, skb);
1689 dev_kfree_skb_any(skb);
1690 return;
1691 }
1692
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001693 prefetch(skb->data);
1694 skb->dev = ndev;
1695 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1696 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1697 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1698 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1699 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1700 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1701 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1702 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001703 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001704 }
1705 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1706 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1707 }
Ron Mercerd555f592009-03-09 10:59:19 +00001708
Ron Mercerd555f592009-03-09 10:59:19 +00001709 skb->protocol = eth_type_trans(skb, ndev);
1710 skb->ip_summed = CHECKSUM_NONE;
1711
1712 /* If rx checksum is on, and there are no
1713 * csum or frame errors.
1714 */
1715 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001716 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1717 /* TCP frame. */
1718 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1719 QPRINTK(qdev, RX_STATUS, DEBUG,
1720 "TCP checksum done!\n");
1721 skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1723 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1724 /* Unfragmented ipv4 UDP frame. */
1725 struct iphdr *iph = (struct iphdr *) skb->data;
1726 if (!(iph->frag_off &
1727 cpu_to_be16(IP_MF|IP_OFFSET))) {
1728 skb->ip_summed = CHECKSUM_UNNECESSARY;
1729 QPRINTK(qdev, RX_STATUS, DEBUG,
1730 "TCP checksum done!\n");
1731 }
1732 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001733 }
Ron Mercerd555f592009-03-09 10:59:19 +00001734
Ron Mercer885ee392009-11-03 13:49:31 +00001735 rx_ring->rx_packets++;
1736 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001737 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001738 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1739 if (qdev->vlgrp &&
1740 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1741 (vlan_id != 0))
1742 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1743 vlan_id, skb);
1744 else
1745 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001746 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001747 if (qdev->vlgrp &&
1748 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1749 (vlan_id != 0))
1750 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1751 else
1752 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001753 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001754}
1755
1756/* Process an outbound completion from an rx ring. */
1757static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1758 struct ob_mac_iocb_rsp *mac_rsp)
1759{
1760 struct tx_ring *tx_ring;
1761 struct tx_ring_desc *tx_ring_desc;
1762
1763 QL_DUMP_OB_MAC_RSP(mac_rsp);
1764 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1765 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1766 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00001767 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1768 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001769 dev_kfree_skb(tx_ring_desc->skb);
1770 tx_ring_desc->skb = NULL;
1771
1772 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1773 OB_MAC_IOCB_RSP_S |
1774 OB_MAC_IOCB_RSP_L |
1775 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1776 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1777 QPRINTK(qdev, TX_DONE, WARNING,
1778 "Total descriptor length did not match transfer length.\n");
1779 }
1780 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1781 QPRINTK(qdev, TX_DONE, WARNING,
1782 "Frame too short to be legal, not sent.\n");
1783 }
1784 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1785 QPRINTK(qdev, TX_DONE, WARNING,
1786 "Frame too long, but sent anyway.\n");
1787 }
1788 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1789 QPRINTK(qdev, TX_DONE, WARNING,
1790 "PCI backplane error. Frame not sent.\n");
1791 }
1792 }
1793 atomic_inc(&tx_ring->tx_count);
1794}
1795
1796/* Fire up a handler to reset the MPI processor. */
1797void ql_queue_fw_error(struct ql_adapter *qdev)
1798{
Ron Mercer6a473302009-07-02 06:06:12 +00001799 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001800 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1801}
1802
1803void ql_queue_asic_error(struct ql_adapter *qdev)
1804{
Ron Mercer6a473302009-07-02 06:06:12 +00001805 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001807 /* Clear adapter up bit to signal the recovery
1808 * process that it shouldn't kill the reset worker
1809 * thread
1810 */
1811 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001812 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1813}
1814
1815static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1816 struct ib_ae_iocb_rsp *ib_ae_rsp)
1817{
1818 switch (ib_ae_rsp->event) {
1819 case MGMT_ERR_EVENT:
1820 QPRINTK(qdev, RX_ERR, ERR,
1821 "Management Processor Fatal Error.\n");
1822 ql_queue_fw_error(qdev);
1823 return;
1824
1825 case CAM_LOOKUP_ERR_EVENT:
1826 QPRINTK(qdev, LINK, ERR,
1827 "Multiple CAM hits lookup occurred.\n");
1828 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1829 ql_queue_asic_error(qdev);
1830 return;
1831
1832 case SOFT_ECC_ERROR_EVENT:
1833 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1834 ql_queue_asic_error(qdev);
1835 break;
1836
1837 case PCI_ERR_ANON_BUF_RD:
1838 QPRINTK(qdev, RX_ERR, ERR,
1839 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1840 ib_ae_rsp->q_id);
1841 ql_queue_asic_error(qdev);
1842 break;
1843
1844 default:
1845 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1846 ib_ae_rsp->event);
1847 ql_queue_asic_error(qdev);
1848 break;
1849 }
1850}
1851
1852static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1853{
1854 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001855 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001856 struct ob_mac_iocb_rsp *net_rsp = NULL;
1857 int count = 0;
1858
Ron Mercer1e213302009-03-09 10:59:21 +00001859 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001860 /* While there are entries in the completion queue. */
1861 while (prod != rx_ring->cnsmr_idx) {
1862
1863 QPRINTK(qdev, RX_STATUS, DEBUG,
1864 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1865 prod, rx_ring->cnsmr_idx);
1866
1867 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1868 rmb();
1869 switch (net_rsp->opcode) {
1870
1871 case OPCODE_OB_MAC_TSO_IOCB:
1872 case OPCODE_OB_MAC_IOCB:
1873 ql_process_mac_tx_intr(qdev, net_rsp);
1874 break;
1875 default:
1876 QPRINTK(qdev, RX_STATUS, DEBUG,
1877 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1878 net_rsp->opcode);
1879 }
1880 count++;
1881 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001882 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001883 }
1884 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00001885 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1886 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1887 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001888 if (atomic_read(&tx_ring->queue_stopped) &&
1889 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1890 /*
1891 * The queue got stopped because the tx_ring was full.
1892 * Wake it up, because it's now at least 25% empty.
1893 */
Ron Mercer1e213302009-03-09 10:59:21 +00001894 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895 }
1896
1897 return count;
1898}
1899
1900static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1901{
1902 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001903 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001904 struct ql_net_rsp_iocb *net_rsp;
1905 int count = 0;
1906
1907 /* While there are entries in the completion queue. */
1908 while (prod != rx_ring->cnsmr_idx) {
1909
1910 QPRINTK(qdev, RX_STATUS, DEBUG,
1911 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1912 prod, rx_ring->cnsmr_idx);
1913
1914 net_rsp = rx_ring->curr_entry;
1915 rmb();
1916 switch (net_rsp->opcode) {
1917 case OPCODE_IB_MAC_IOCB:
1918 ql_process_mac_rx_intr(qdev, rx_ring,
1919 (struct ib_mac_iocb_rsp *)
1920 net_rsp);
1921 break;
1922
1923 case OPCODE_IB_AE_IOCB:
1924 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1925 net_rsp);
1926 break;
1927 default:
1928 {
1929 QPRINTK(qdev, RX_STATUS, DEBUG,
1930 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1931 net_rsp->opcode);
1932 }
1933 }
1934 count++;
1935 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001936 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001937 if (count == budget)
1938 break;
1939 }
1940 ql_update_buffer_queues(qdev, rx_ring);
1941 ql_write_cq_idx(rx_ring);
1942 return count;
1943}
1944
1945static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1946{
1947 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1948 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00001949 struct rx_ring *trx_ring;
1950 int i, work_done = 0;
1951 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001952
1953 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1954 rx_ring->cq_id);
1955
Ron Mercer39aa8162009-08-27 11:02:11 +00001956 /* Service the TX rings first. They start
1957 * right after the RSS rings. */
1958 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1959 trx_ring = &qdev->rx_ring[i];
1960 /* If this TX completion ring belongs to this vector and
1961 * it's not empty then service it.
1962 */
1963 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1964 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1965 trx_ring->cnsmr_idx)) {
1966 QPRINTK(qdev, INTR, DEBUG,
1967 "%s: Servicing TX completion ring %d.\n",
1968 __func__, trx_ring->cq_id);
1969 ql_clean_outbound_rx_ring(trx_ring);
1970 }
1971 }
1972
1973 /*
1974 * Now service the RSS ring if it's active.
1975 */
1976 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1977 rx_ring->cnsmr_idx) {
1978 QPRINTK(qdev, INTR, DEBUG,
1979 "%s: Servicing RX completion ring %d.\n",
1980 __func__, rx_ring->cq_id);
1981 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1982 }
1983
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001984 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001985 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001986 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1987 }
1988 return work_done;
1989}
1990
Ron Mercer01e6b952009-10-30 12:13:34 +00001991static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001992{
1993 struct ql_adapter *qdev = netdev_priv(ndev);
1994
1995 qdev->vlgrp = grp;
1996 if (grp) {
1997 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1998 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1999 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2000 } else {
2001 QPRINTK(qdev, IFUP, DEBUG,
2002 "Turning off VLAN in NIC_RCV_CFG.\n");
2003 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2004 }
2005}
2006
Ron Mercer01e6b952009-10-30 12:13:34 +00002007static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002008{
2009 struct ql_adapter *qdev = netdev_priv(ndev);
2010 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002011 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002012
Ron Mercercc288f52009-02-23 10:42:14 +00002013 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2014 if (status)
2015 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002016 if (ql_set_mac_addr_reg
2017 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2018 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2019 }
Ron Mercercc288f52009-02-23 10:42:14 +00002020 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002021}
2022
Ron Mercer01e6b952009-10-30 12:13:34 +00002023static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002024{
2025 struct ql_adapter *qdev = netdev_priv(ndev);
2026 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002027 int status;
2028
2029 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2030 if (status)
2031 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002032
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002033 if (ql_set_mac_addr_reg
2034 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2035 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2036 }
Ron Mercercc288f52009-02-23 10:42:14 +00002037 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002038
2039}
2040
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002041/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2042static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2043{
2044 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002045 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002046 return IRQ_HANDLED;
2047}
2048
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002049/* This handles a fatal error, MPI activity, and the default
2050 * rx_ring in an MSI-X multiple vector environment.
2051 * In MSI/Legacy environment it also process the rest of
2052 * the rx_rings.
2053 */
2054static irqreturn_t qlge_isr(int irq, void *dev_id)
2055{
2056 struct rx_ring *rx_ring = dev_id;
2057 struct ql_adapter *qdev = rx_ring->qdev;
2058 struct intr_context *intr_context = &qdev->intr_context[0];
2059 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002060 int work_done = 0;
2061
Ron Mercerbb0d2152008-10-20 10:30:26 -07002062 spin_lock(&qdev->hw_lock);
2063 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2064 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2065 spin_unlock(&qdev->hw_lock);
2066 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002067 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002068 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002069
Ron Mercerbb0d2152008-10-20 10:30:26 -07002070 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002071
2072 /*
2073 * Check for fatal error.
2074 */
2075 if (var & STS_FE) {
2076 ql_queue_asic_error(qdev);
2077 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2078 var = ql_read32(qdev, ERR_STS);
2079 QPRINTK(qdev, INTR, ERR,
2080 "Resetting chip. Error Status Register = 0x%x\n", var);
2081 return IRQ_HANDLED;
2082 }
2083
2084 /*
2085 * Check MPI processor activity.
2086 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002087 if ((var & STS_PI) &&
2088 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002089 /*
2090 * We've got an async event or mailbox completion.
2091 * Handle it and clear the source of the interrupt.
2092 */
2093 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2094 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002095 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2096 queue_delayed_work_on(smp_processor_id(),
2097 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002098 work_done++;
2099 }
2100
2101 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002102 * Get the bit-mask that shows the active queues for this
2103 * pass. Compare it to the queues that this irq services
2104 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002105 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002106 var = ql_read32(qdev, ISR1);
2107 if (var & intr_context->irq_mask) {
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002108 QPRINTK(qdev, INTR, INFO,
Ron Mercer39aa8162009-08-27 11:02:11 +00002109 "Waking handler for rx_ring[0].\n");
2110 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002111 napi_schedule(&rx_ring->napi);
2112 work_done++;
2113 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002114 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002115 return work_done ? IRQ_HANDLED : IRQ_NONE;
2116}
2117
2118static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2119{
2120
2121 if (skb_is_gso(skb)) {
2122 int err;
2123 if (skb_header_cloned(skb)) {
2124 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2125 if (err)
2126 return err;
2127 }
2128
2129 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2130 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2131 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2132 mac_iocb_ptr->total_hdrs_len =
2133 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2134 mac_iocb_ptr->net_trans_offset =
2135 cpu_to_le16(skb_network_offset(skb) |
2136 skb_transport_offset(skb)
2137 << OB_MAC_TRANSPORT_HDR_SHIFT);
2138 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2139 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2140 if (likely(skb->protocol == htons(ETH_P_IP))) {
2141 struct iphdr *iph = ip_hdr(skb);
2142 iph->check = 0;
2143 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2144 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2145 iph->daddr, 0,
2146 IPPROTO_TCP,
2147 0);
2148 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2149 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2150 tcp_hdr(skb)->check =
2151 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2152 &ipv6_hdr(skb)->daddr,
2153 0, IPPROTO_TCP, 0);
2154 }
2155 return 1;
2156 }
2157 return 0;
2158}
2159
2160static void ql_hw_csum_setup(struct sk_buff *skb,
2161 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2162{
2163 int len;
2164 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002165 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2167 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2168 mac_iocb_ptr->net_trans_offset =
2169 cpu_to_le16(skb_network_offset(skb) |
2170 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2171
2172 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2173 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2174 if (likely(iph->protocol == IPPROTO_TCP)) {
2175 check = &(tcp_hdr(skb)->check);
2176 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2177 mac_iocb_ptr->total_hdrs_len =
2178 cpu_to_le16(skb_transport_offset(skb) +
2179 (tcp_hdr(skb)->doff << 2));
2180 } else {
2181 check = &(udp_hdr(skb)->check);
2182 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2183 mac_iocb_ptr->total_hdrs_len =
2184 cpu_to_le16(skb_transport_offset(skb) +
2185 sizeof(struct udphdr));
2186 }
2187 *check = ~csum_tcpudp_magic(iph->saddr,
2188 iph->daddr, len, iph->protocol, 0);
2189}
2190
Stephen Hemminger613573252009-08-31 19:50:58 +00002191static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002192{
2193 struct tx_ring_desc *tx_ring_desc;
2194 struct ob_mac_iocb_req *mac_iocb_ptr;
2195 struct ql_adapter *qdev = netdev_priv(ndev);
2196 int tso;
2197 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002198 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002199
2200 tx_ring = &qdev->tx_ring[tx_ring_idx];
2201
Ron Mercer74c50b42009-03-09 10:59:27 +00002202 if (skb_padto(skb, ETH_ZLEN))
2203 return NETDEV_TX_OK;
2204
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002205 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2206 QPRINTK(qdev, TX_QUEUED, INFO,
2207 "%s: shutting down tx queue %d du to lack of resources.\n",
2208 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002209 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002210 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002211 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002212 return NETDEV_TX_BUSY;
2213 }
2214 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2215 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002216 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002217
2218 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2219 mac_iocb_ptr->tid = tx_ring_desc->index;
2220 /* We use the upper 32-bits to store the tx queue for this IO.
2221 * When we get the completion we can use it to establish the context.
2222 */
2223 mac_iocb_ptr->txq_idx = tx_ring_idx;
2224 tx_ring_desc->skb = skb;
2225
2226 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2227
2228 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2229 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2230 vlan_tx_tag_get(skb));
2231 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2232 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2233 }
2234 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2235 if (tso < 0) {
2236 dev_kfree_skb_any(skb);
2237 return NETDEV_TX_OK;
2238 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2239 ql_hw_csum_setup(skb,
2240 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2241 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002242 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2243 NETDEV_TX_OK) {
2244 QPRINTK(qdev, TX_QUEUED, ERR,
2245 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002246 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002247 return NETDEV_TX_BUSY;
2248 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002249 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2250 tx_ring->prod_idx++;
2251 if (tx_ring->prod_idx == tx_ring->wq_len)
2252 tx_ring->prod_idx = 0;
2253 wmb();
2254
2255 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002256 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2257 tx_ring->prod_idx, skb->len);
2258
2259 atomic_dec(&tx_ring->tx_count);
2260 return NETDEV_TX_OK;
2261}
2262
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002263
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002264static void ql_free_shadow_space(struct ql_adapter *qdev)
2265{
2266 if (qdev->rx_ring_shadow_reg_area) {
2267 pci_free_consistent(qdev->pdev,
2268 PAGE_SIZE,
2269 qdev->rx_ring_shadow_reg_area,
2270 qdev->rx_ring_shadow_reg_dma);
2271 qdev->rx_ring_shadow_reg_area = NULL;
2272 }
2273 if (qdev->tx_ring_shadow_reg_area) {
2274 pci_free_consistent(qdev->pdev,
2275 PAGE_SIZE,
2276 qdev->tx_ring_shadow_reg_area,
2277 qdev->tx_ring_shadow_reg_dma);
2278 qdev->tx_ring_shadow_reg_area = NULL;
2279 }
2280}
2281
2282static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2283{
2284 qdev->rx_ring_shadow_reg_area =
2285 pci_alloc_consistent(qdev->pdev,
2286 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2287 if (qdev->rx_ring_shadow_reg_area == NULL) {
2288 QPRINTK(qdev, IFUP, ERR,
2289 "Allocation of RX shadow space failed.\n");
2290 return -ENOMEM;
2291 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002292 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002293 qdev->tx_ring_shadow_reg_area =
2294 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2295 &qdev->tx_ring_shadow_reg_dma);
2296 if (qdev->tx_ring_shadow_reg_area == NULL) {
2297 QPRINTK(qdev, IFUP, ERR,
2298 "Allocation of TX shadow space failed.\n");
2299 goto err_wqp_sh_area;
2300 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002301 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002302 return 0;
2303
2304err_wqp_sh_area:
2305 pci_free_consistent(qdev->pdev,
2306 PAGE_SIZE,
2307 qdev->rx_ring_shadow_reg_area,
2308 qdev->rx_ring_shadow_reg_dma);
2309 return -ENOMEM;
2310}
2311
2312static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2313{
2314 struct tx_ring_desc *tx_ring_desc;
2315 int i;
2316 struct ob_mac_iocb_req *mac_iocb_ptr;
2317
2318 mac_iocb_ptr = tx_ring->wq_base;
2319 tx_ring_desc = tx_ring->q;
2320 for (i = 0; i < tx_ring->wq_len; i++) {
2321 tx_ring_desc->index = i;
2322 tx_ring_desc->skb = NULL;
2323 tx_ring_desc->queue_entry = mac_iocb_ptr;
2324 mac_iocb_ptr++;
2325 tx_ring_desc++;
2326 }
2327 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2328 atomic_set(&tx_ring->queue_stopped, 0);
2329}
2330
2331static void ql_free_tx_resources(struct ql_adapter *qdev,
2332 struct tx_ring *tx_ring)
2333{
2334 if (tx_ring->wq_base) {
2335 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2336 tx_ring->wq_base, tx_ring->wq_base_dma);
2337 tx_ring->wq_base = NULL;
2338 }
2339 kfree(tx_ring->q);
2340 tx_ring->q = NULL;
2341}
2342
2343static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2344 struct tx_ring *tx_ring)
2345{
2346 tx_ring->wq_base =
2347 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2348 &tx_ring->wq_base_dma);
2349
Joe Perches8e95a202009-12-03 07:58:21 +00002350 if ((tx_ring->wq_base == NULL) ||
2351 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002352 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2353 return -ENOMEM;
2354 }
2355 tx_ring->q =
2356 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2357 if (tx_ring->q == NULL)
2358 goto err;
2359
2360 return 0;
2361err:
2362 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2363 tx_ring->wq_base, tx_ring->wq_base_dma);
2364 return -ENOMEM;
2365}
2366
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002367static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002368{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002369 struct bq_desc *lbq_desc;
2370
Ron Mercer7c734352009-10-19 03:32:19 +00002371 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002372
Ron Mercer7c734352009-10-19 03:32:19 +00002373 curr_idx = rx_ring->lbq_curr_idx;
2374 clean_idx = rx_ring->lbq_clean_idx;
2375 while (curr_idx != clean_idx) {
2376 lbq_desc = &rx_ring->lbq[curr_idx];
2377
2378 if (lbq_desc->p.pg_chunk.last_flag) {
2379 pci_unmap_page(qdev->pdev,
2380 lbq_desc->p.pg_chunk.map,
2381 ql_lbq_block_size(qdev),
2382 PCI_DMA_FROMDEVICE);
2383 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384 }
Ron Mercer7c734352009-10-19 03:32:19 +00002385
2386 put_page(lbq_desc->p.pg_chunk.page);
2387 lbq_desc->p.pg_chunk.page = NULL;
2388
2389 if (++curr_idx == rx_ring->lbq_len)
2390 curr_idx = 0;
2391
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002392 }
2393}
2394
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002395static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002396{
2397 int i;
2398 struct bq_desc *sbq_desc;
2399
2400 for (i = 0; i < rx_ring->sbq_len; i++) {
2401 sbq_desc = &rx_ring->sbq[i];
2402 if (sbq_desc == NULL) {
2403 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2404 return;
2405 }
2406 if (sbq_desc->p.skb) {
2407 pci_unmap_single(qdev->pdev,
2408 pci_unmap_addr(sbq_desc, mapaddr),
2409 pci_unmap_len(sbq_desc, maplen),
2410 PCI_DMA_FROMDEVICE);
2411 dev_kfree_skb(sbq_desc->p.skb);
2412 sbq_desc->p.skb = NULL;
2413 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002414 }
2415}
2416
Ron Mercer4545a3f2009-02-23 10:42:17 +00002417/* Free all large and small rx buffers associated
2418 * with the completion queues for this device.
2419 */
2420static void ql_free_rx_buffers(struct ql_adapter *qdev)
2421{
2422 int i;
2423 struct rx_ring *rx_ring;
2424
2425 for (i = 0; i < qdev->rx_ring_count; i++) {
2426 rx_ring = &qdev->rx_ring[i];
2427 if (rx_ring->lbq)
2428 ql_free_lbq_buffers(qdev, rx_ring);
2429 if (rx_ring->sbq)
2430 ql_free_sbq_buffers(qdev, rx_ring);
2431 }
2432}
2433
2434static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2435{
2436 struct rx_ring *rx_ring;
2437 int i;
2438
2439 for (i = 0; i < qdev->rx_ring_count; i++) {
2440 rx_ring = &qdev->rx_ring[i];
2441 if (rx_ring->type != TX_Q)
2442 ql_update_buffer_queues(qdev, rx_ring);
2443 }
2444}
2445
2446static void ql_init_lbq_ring(struct ql_adapter *qdev,
2447 struct rx_ring *rx_ring)
2448{
2449 int i;
2450 struct bq_desc *lbq_desc;
2451 __le64 *bq = rx_ring->lbq_base;
2452
2453 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2454 for (i = 0; i < rx_ring->lbq_len; i++) {
2455 lbq_desc = &rx_ring->lbq[i];
2456 memset(lbq_desc, 0, sizeof(*lbq_desc));
2457 lbq_desc->index = i;
2458 lbq_desc->addr = bq;
2459 bq++;
2460 }
2461}
2462
2463static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002464 struct rx_ring *rx_ring)
2465{
2466 int i;
2467 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002468 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002469
Ron Mercer4545a3f2009-02-23 10:42:17 +00002470 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002471 for (i = 0; i < rx_ring->sbq_len; i++) {
2472 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002473 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002474 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002475 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002476 bq++;
2477 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002478}
2479
2480static void ql_free_rx_resources(struct ql_adapter *qdev,
2481 struct rx_ring *rx_ring)
2482{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002483 /* Free the small buffer queue. */
2484 if (rx_ring->sbq_base) {
2485 pci_free_consistent(qdev->pdev,
2486 rx_ring->sbq_size,
2487 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2488 rx_ring->sbq_base = NULL;
2489 }
2490
2491 /* Free the small buffer queue control blocks. */
2492 kfree(rx_ring->sbq);
2493 rx_ring->sbq = NULL;
2494
2495 /* Free the large buffer queue. */
2496 if (rx_ring->lbq_base) {
2497 pci_free_consistent(qdev->pdev,
2498 rx_ring->lbq_size,
2499 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2500 rx_ring->lbq_base = NULL;
2501 }
2502
2503 /* Free the large buffer queue control blocks. */
2504 kfree(rx_ring->lbq);
2505 rx_ring->lbq = NULL;
2506
2507 /* Free the rx queue. */
2508 if (rx_ring->cq_base) {
2509 pci_free_consistent(qdev->pdev,
2510 rx_ring->cq_size,
2511 rx_ring->cq_base, rx_ring->cq_base_dma);
2512 rx_ring->cq_base = NULL;
2513 }
2514}
2515
2516/* Allocate queues and buffers for this completions queue based
2517 * on the values in the parameter structure. */
2518static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2519 struct rx_ring *rx_ring)
2520{
2521
2522 /*
2523 * Allocate the completion queue for this rx_ring.
2524 */
2525 rx_ring->cq_base =
2526 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2527 &rx_ring->cq_base_dma);
2528
2529 if (rx_ring->cq_base == NULL) {
2530 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2531 return -ENOMEM;
2532 }
2533
2534 if (rx_ring->sbq_len) {
2535 /*
2536 * Allocate small buffer queue.
2537 */
2538 rx_ring->sbq_base =
2539 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2540 &rx_ring->sbq_base_dma);
2541
2542 if (rx_ring->sbq_base == NULL) {
2543 QPRINTK(qdev, IFUP, ERR,
2544 "Small buffer queue allocation failed.\n");
2545 goto err_mem;
2546 }
2547
2548 /*
2549 * Allocate small buffer queue control blocks.
2550 */
2551 rx_ring->sbq =
2552 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2553 GFP_KERNEL);
2554 if (rx_ring->sbq == NULL) {
2555 QPRINTK(qdev, IFUP, ERR,
2556 "Small buffer queue control block allocation failed.\n");
2557 goto err_mem;
2558 }
2559
Ron Mercer4545a3f2009-02-23 10:42:17 +00002560 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002561 }
2562
2563 if (rx_ring->lbq_len) {
2564 /*
2565 * Allocate large buffer queue.
2566 */
2567 rx_ring->lbq_base =
2568 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2569 &rx_ring->lbq_base_dma);
2570
2571 if (rx_ring->lbq_base == NULL) {
2572 QPRINTK(qdev, IFUP, ERR,
2573 "Large buffer queue allocation failed.\n");
2574 goto err_mem;
2575 }
2576 /*
2577 * Allocate large buffer queue control blocks.
2578 */
2579 rx_ring->lbq =
2580 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2581 GFP_KERNEL);
2582 if (rx_ring->lbq == NULL) {
2583 QPRINTK(qdev, IFUP, ERR,
2584 "Large buffer queue control block allocation failed.\n");
2585 goto err_mem;
2586 }
2587
Ron Mercer4545a3f2009-02-23 10:42:17 +00002588 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002589 }
2590
2591 return 0;
2592
2593err_mem:
2594 ql_free_rx_resources(qdev, rx_ring);
2595 return -ENOMEM;
2596}
2597
2598static void ql_tx_ring_clean(struct ql_adapter *qdev)
2599{
2600 struct tx_ring *tx_ring;
2601 struct tx_ring_desc *tx_ring_desc;
2602 int i, j;
2603
2604 /*
2605 * Loop through all queues and free
2606 * any resources.
2607 */
2608 for (j = 0; j < qdev->tx_ring_count; j++) {
2609 tx_ring = &qdev->tx_ring[j];
2610 for (i = 0; i < tx_ring->wq_len; i++) {
2611 tx_ring_desc = &tx_ring->q[i];
2612 if (tx_ring_desc && tx_ring_desc->skb) {
2613 QPRINTK(qdev, IFDOWN, ERR,
2614 "Freeing lost SKB %p, from queue %d, index %d.\n",
2615 tx_ring_desc->skb, j,
2616 tx_ring_desc->index);
2617 ql_unmap_send(qdev, tx_ring_desc,
2618 tx_ring_desc->map_cnt);
2619 dev_kfree_skb(tx_ring_desc->skb);
2620 tx_ring_desc->skb = NULL;
2621 }
2622 }
2623 }
2624}
2625
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002626static void ql_free_mem_resources(struct ql_adapter *qdev)
2627{
2628 int i;
2629
2630 for (i = 0; i < qdev->tx_ring_count; i++)
2631 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2632 for (i = 0; i < qdev->rx_ring_count; i++)
2633 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2634 ql_free_shadow_space(qdev);
2635}
2636
2637static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2638{
2639 int i;
2640
2641 /* Allocate space for our shadow registers and such. */
2642 if (ql_alloc_shadow_space(qdev))
2643 return -ENOMEM;
2644
2645 for (i = 0; i < qdev->rx_ring_count; i++) {
2646 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2647 QPRINTK(qdev, IFUP, ERR,
2648 "RX resource allocation failed.\n");
2649 goto err_mem;
2650 }
2651 }
2652 /* Allocate tx queue resources */
2653 for (i = 0; i < qdev->tx_ring_count; i++) {
2654 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2655 QPRINTK(qdev, IFUP, ERR,
2656 "TX resource allocation failed.\n");
2657 goto err_mem;
2658 }
2659 }
2660 return 0;
2661
2662err_mem:
2663 ql_free_mem_resources(qdev);
2664 return -ENOMEM;
2665}
2666
2667/* Set up the rx ring control block and pass it to the chip.
2668 * The control block is defined as
2669 * "Completion Queue Initialization Control Block", or cqicb.
2670 */
2671static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2672{
2673 struct cqicb *cqicb = &rx_ring->cqicb;
2674 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002675 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002676 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002677 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002678 void __iomem *doorbell_area =
2679 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2680 int err = 0;
2681 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002682 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002683 __le64 *base_indirect_ptr;
2684 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002685
2686 /* Set up the shadow registers for this ring. */
2687 rx_ring->prod_idx_sh_reg = shadow_reg;
2688 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00002689 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002690 shadow_reg += sizeof(u64);
2691 shadow_reg_dma += sizeof(u64);
2692 rx_ring->lbq_base_indirect = shadow_reg;
2693 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002694 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2695 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002696 rx_ring->sbq_base_indirect = shadow_reg;
2697 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2698
2699 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002700 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002701 rx_ring->cnsmr_idx = 0;
2702 rx_ring->curr_entry = rx_ring->cq_base;
2703
2704 /* PCI doorbell mem area + 0x04 for valid register */
2705 rx_ring->valid_db_reg = doorbell_area + 0x04;
2706
2707 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002708 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002709
2710 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002711 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002712
2713 memset((void *)cqicb, 0, sizeof(struct cqicb));
2714 cqicb->msix_vect = rx_ring->irq;
2715
Ron Mercer459caf52009-01-04 17:08:11 -08002716 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2717 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002718
Ron Mercer97345522009-01-09 11:31:50 +00002719 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002720
Ron Mercer97345522009-01-09 11:31:50 +00002721 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002722
2723 /*
2724 * Set up the control block load flags.
2725 */
2726 cqicb->flags = FLAGS_LC | /* Load queue base address */
2727 FLAGS_LV | /* Load MSI-X vector */
2728 FLAGS_LI; /* Load irq delay values */
2729 if (rx_ring->lbq_len) {
2730 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002731 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002732 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2733 page_entries = 0;
2734 do {
2735 *base_indirect_ptr = cpu_to_le64(tmp);
2736 tmp += DB_PAGE_SIZE;
2737 base_indirect_ptr++;
2738 page_entries++;
2739 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002740 cqicb->lbq_addr =
2741 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002742 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2743 (u16) rx_ring->lbq_buf_size;
2744 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2745 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2746 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002748 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002749 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002750 rx_ring->lbq_clean_idx = 0;
2751 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002752 }
2753 if (rx_ring->sbq_len) {
2754 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002755 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002756 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2757 page_entries = 0;
2758 do {
2759 *base_indirect_ptr = cpu_to_le64(tmp);
2760 tmp += DB_PAGE_SIZE;
2761 base_indirect_ptr++;
2762 page_entries++;
2763 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002764 cqicb->sbq_addr =
2765 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002766 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00002767 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08002768 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2769 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002770 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002771 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002772 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002773 rx_ring->sbq_clean_idx = 0;
2774 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775 }
2776 switch (rx_ring->type) {
2777 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002778 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2779 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2780 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002781 case RX_Q:
2782 /* Inbound completion handling rx_rings run in
2783 * separate NAPI contexts.
2784 */
2785 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2786 64);
2787 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2788 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2789 break;
2790 default:
2791 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2792 rx_ring->type);
2793 }
Ron Mercer49740972009-02-26 10:08:36 +00002794 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002795 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2796 CFG_LCQ, rx_ring->cq_id);
2797 if (err) {
2798 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2799 return err;
2800 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002801 return err;
2802}
2803
2804static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2805{
2806 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2807 void __iomem *doorbell_area =
2808 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2809 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2810 (tx_ring->wq_id * sizeof(u64));
2811 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2812 (tx_ring->wq_id * sizeof(u64));
2813 int err = 0;
2814
2815 /*
2816 * Assign doorbell registers for this tx_ring.
2817 */
2818 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002819 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002820 tx_ring->prod_idx = 0;
2821 /* TX PCI doorbell mem area + 0x04 */
2822 tx_ring->valid_db_reg = doorbell_area + 0x04;
2823
2824 /*
2825 * Assign shadow registers for this tx_ring.
2826 */
2827 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2828 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2829
2830 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2831 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2832 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2833 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2834 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002835 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002836
Ron Mercer97345522009-01-09 11:31:50 +00002837 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002838
2839 ql_init_tx_ring(qdev, tx_ring);
2840
Ron Mercere3324712009-07-02 06:06:13 +00002841 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002842 (u16) tx_ring->wq_id);
2843 if (err) {
2844 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2845 return err;
2846 }
Ron Mercer49740972009-02-26 10:08:36 +00002847 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002848 return err;
2849}
2850
2851static void ql_disable_msix(struct ql_adapter *qdev)
2852{
2853 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2854 pci_disable_msix(qdev->pdev);
2855 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2856 kfree(qdev->msi_x_entry);
2857 qdev->msi_x_entry = NULL;
2858 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2859 pci_disable_msi(qdev->pdev);
2860 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2861 }
2862}
2863
Ron Mercera4ab6132009-08-27 11:02:10 +00002864/* We start by trying to get the number of vectors
2865 * stored in qdev->intr_count. If we don't get that
2866 * many then we reduce the count and try again.
2867 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002868static void ql_enable_msix(struct ql_adapter *qdev)
2869{
Ron Mercera4ab6132009-08-27 11:02:10 +00002870 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002871
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002872 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00002873 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002874 /* Try to alloc space for the msix struct,
2875 * if it fails then go to MSI/legacy.
2876 */
Ron Mercera4ab6132009-08-27 11:02:10 +00002877 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002878 sizeof(struct msix_entry),
2879 GFP_KERNEL);
2880 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00002881 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002882 goto msi;
2883 }
2884
Ron Mercera4ab6132009-08-27 11:02:10 +00002885 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002886 qdev->msi_x_entry[i].entry = i;
2887
Ron Mercera4ab6132009-08-27 11:02:10 +00002888 /* Loop to get our vectors. We start with
2889 * what we want and settle for what we get.
2890 */
2891 do {
2892 err = pci_enable_msix(qdev->pdev,
2893 qdev->msi_x_entry, qdev->intr_count);
2894 if (err > 0)
2895 qdev->intr_count = err;
2896 } while (err > 0);
2897
2898 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002899 kfree(qdev->msi_x_entry);
2900 qdev->msi_x_entry = NULL;
2901 QPRINTK(qdev, IFUP, WARNING,
2902 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00002903 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00002904 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00002905 } else if (err == 0) {
2906 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2907 QPRINTK(qdev, IFUP, INFO,
2908 "MSI-X Enabled, got %d vectors.\n",
2909 qdev->intr_count);
2910 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002911 }
2912 }
2913msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00002914 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00002915 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002916 if (!pci_enable_msi(qdev->pdev)) {
2917 set_bit(QL_MSI_ENABLED, &qdev->flags);
2918 QPRINTK(qdev, IFUP, INFO,
2919 "Running with MSI interrupts.\n");
2920 return;
2921 }
2922 }
Ron Mercera5a62a12009-11-11 12:54:05 +00002923 qlge_irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2925}
2926
Ron Mercer39aa8162009-08-27 11:02:11 +00002927/* Each vector services 1 RSS ring and and 1 or more
2928 * TX completion rings. This function loops through
2929 * the TX completion rings and assigns the vector that
2930 * will service it. An example would be if there are
2931 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2932 * This would mean that vector 0 would service RSS ring 0
2933 * and TX competion rings 0,1,2 and 3. Vector 1 would
2934 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2935 */
2936static void ql_set_tx_vect(struct ql_adapter *qdev)
2937{
2938 int i, j, vect;
2939 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2940
2941 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2942 /* Assign irq vectors to TX rx_rings.*/
2943 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2944 i < qdev->rx_ring_count; i++) {
2945 if (j == tx_rings_per_vector) {
2946 vect++;
2947 j = 0;
2948 }
2949 qdev->rx_ring[i].irq = vect;
2950 j++;
2951 }
2952 } else {
2953 /* For single vector all rings have an irq
2954 * of zero.
2955 */
2956 for (i = 0; i < qdev->rx_ring_count; i++)
2957 qdev->rx_ring[i].irq = 0;
2958 }
2959}
2960
2961/* Set the interrupt mask for this vector. Each vector
2962 * will service 1 RSS ring and 1 or more TX completion
2963 * rings. This function sets up a bit mask per vector
2964 * that indicates which rings it services.
2965 */
2966static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2967{
2968 int j, vect = ctx->intr;
2969 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2970
2971 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2972 /* Add the RSS ring serviced by this vector
2973 * to the mask.
2974 */
2975 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2976 /* Add the TX ring(s) serviced by this vector
2977 * to the mask. */
2978 for (j = 0; j < tx_rings_per_vector; j++) {
2979 ctx->irq_mask |=
2980 (1 << qdev->rx_ring[qdev->rss_ring_count +
2981 (vect * tx_rings_per_vector) + j].cq_id);
2982 }
2983 } else {
2984 /* For single vector we just shift each queue's
2985 * ID into the mask.
2986 */
2987 for (j = 0; j < qdev->rx_ring_count; j++)
2988 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2989 }
2990}
2991
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002992/*
2993 * Here we build the intr_context structures based on
2994 * our rx_ring count and intr vector count.
2995 * The intr_context structure is used to hook each vector
2996 * to possibly different handlers.
2997 */
2998static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2999{
3000 int i = 0;
3001 struct intr_context *intr_context = &qdev->intr_context[0];
3002
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003003 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3004 /* Each rx_ring has it's
3005 * own intr_context since we have separate
3006 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003007 */
3008 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3009 qdev->rx_ring[i].irq = i;
3010 intr_context->intr = i;
3011 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003012 /* Set up this vector's bit-mask that indicates
3013 * which queues it services.
3014 */
3015 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003016 /*
3017 * We set up each vectors enable/disable/read bits so
3018 * there's no bit/mask calculations in the critical path.
3019 */
3020 intr_context->intr_en_mask =
3021 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3022 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3023 | i;
3024 intr_context->intr_dis_mask =
3025 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3026 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3027 INTR_EN_IHD | i;
3028 intr_context->intr_read_mask =
3029 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3030 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3031 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003032 if (i == 0) {
3033 /* The first vector/queue handles
3034 * broadcast/multicast, fatal errors,
3035 * and firmware events. This in addition
3036 * to normal inbound NAPI processing.
3037 */
3038 intr_context->handler = qlge_isr;
3039 sprintf(intr_context->name, "%s-rx-%d",
3040 qdev->ndev->name, i);
3041 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003042 /*
3043 * Inbound queues handle unicast frames only.
3044 */
3045 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003046 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003047 qdev->ndev->name, i);
3048 }
3049 }
3050 } else {
3051 /*
3052 * All rx_rings use the same intr_context since
3053 * there is only one vector.
3054 */
3055 intr_context->intr = 0;
3056 intr_context->qdev = qdev;
3057 /*
3058 * We set up each vectors enable/disable/read bits so
3059 * there's no bit/mask calculations in the critical path.
3060 */
3061 intr_context->intr_en_mask =
3062 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3063 intr_context->intr_dis_mask =
3064 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3065 INTR_EN_TYPE_DISABLE;
3066 intr_context->intr_read_mask =
3067 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3068 /*
3069 * Single interrupt means one handler for all rings.
3070 */
3071 intr_context->handler = qlge_isr;
3072 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003073 /* Set up this vector's bit-mask that indicates
3074 * which queues it services. In this case there is
3075 * a single vector so it will service all RSS and
3076 * TX completion rings.
3077 */
3078 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003079 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003080 /* Tell the TX completion rings which MSIx vector
3081 * they will be using.
3082 */
3083 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003084}
3085
3086static void ql_free_irq(struct ql_adapter *qdev)
3087{
3088 int i;
3089 struct intr_context *intr_context = &qdev->intr_context[0];
3090
3091 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3092 if (intr_context->hooked) {
3093 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3094 free_irq(qdev->msi_x_entry[i].vector,
3095 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00003096 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003097 "freeing msix interrupt %d.\n", i);
3098 } else {
3099 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00003100 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003101 "freeing msi interrupt %d.\n", i);
3102 }
3103 }
3104 }
3105 ql_disable_msix(qdev);
3106}
3107
3108static int ql_request_irq(struct ql_adapter *qdev)
3109{
3110 int i;
3111 int status = 0;
3112 struct pci_dev *pdev = qdev->pdev;
3113 struct intr_context *intr_context = &qdev->intr_context[0];
3114
3115 ql_resolve_queues_to_irqs(qdev);
3116
3117 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3118 atomic_set(&intr_context->irq_cnt, 0);
3119 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3120 status = request_irq(qdev->msi_x_entry[i].vector,
3121 intr_context->handler,
3122 0,
3123 intr_context->name,
3124 &qdev->rx_ring[i]);
3125 if (status) {
3126 QPRINTK(qdev, IFUP, ERR,
3127 "Failed request for MSIX interrupt %d.\n",
3128 i);
3129 goto err_irq;
3130 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003131 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003132 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3133 i,
3134 qdev->rx_ring[i].type ==
3135 DEFAULT_Q ? "DEFAULT_Q" : "",
3136 qdev->rx_ring[i].type ==
3137 TX_Q ? "TX_Q" : "",
3138 qdev->rx_ring[i].type ==
3139 RX_Q ? "RX_Q" : "", intr_context->name);
3140 }
3141 } else {
3142 QPRINTK(qdev, IFUP, DEBUG,
3143 "trying msi or legacy interrupts.\n");
3144 QPRINTK(qdev, IFUP, DEBUG,
3145 "%s: irq = %d.\n", __func__, pdev->irq);
3146 QPRINTK(qdev, IFUP, DEBUG,
3147 "%s: context->name = %s.\n", __func__,
3148 intr_context->name);
3149 QPRINTK(qdev, IFUP, DEBUG,
3150 "%s: dev_id = 0x%p.\n", __func__,
3151 &qdev->rx_ring[0]);
3152 status =
3153 request_irq(pdev->irq, qlge_isr,
3154 test_bit(QL_MSI_ENABLED,
3155 &qdev->
3156 flags) ? 0 : IRQF_SHARED,
3157 intr_context->name, &qdev->rx_ring[0]);
3158 if (status)
3159 goto err_irq;
3160
3161 QPRINTK(qdev, IFUP, ERR,
3162 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3163 i,
3164 qdev->rx_ring[0].type ==
3165 DEFAULT_Q ? "DEFAULT_Q" : "",
3166 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3167 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3168 intr_context->name);
3169 }
3170 intr_context->hooked = 1;
3171 }
3172 return status;
3173err_irq:
3174 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3175 ql_free_irq(qdev);
3176 return status;
3177}
3178
3179static int ql_start_rss(struct ql_adapter *qdev)
3180{
Ron Mercer541ae282009-10-08 09:54:37 +00003181 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3182 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3183 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3184 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3185 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3186 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003187 struct ricb *ricb = &qdev->ricb;
3188 int status = 0;
3189 int i;
3190 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3191
Ron Mercere3324712009-07-02 06:06:13 +00003192 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003193
Ron Mercerb2014ff2009-08-27 11:02:09 +00003194 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003195 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003196 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3197 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198
3199 /*
3200 * Fill out the Indirection Table.
3201 */
Ron Mercer541ae282009-10-08 09:54:37 +00003202 for (i = 0; i < 1024; i++)
3203 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003204
Ron Mercer541ae282009-10-08 09:54:37 +00003205 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3206 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003207
Ron Mercer49740972009-02-26 10:08:36 +00003208 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003209
Ron Mercere3324712009-07-02 06:06:13 +00003210 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003211 if (status) {
3212 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3213 return status;
3214 }
Ron Mercer49740972009-02-26 10:08:36 +00003215 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003216 return status;
3217}
3218
Ron Mercera5f59dc2009-07-02 06:06:07 +00003219static int ql_clear_routing_entries(struct ql_adapter *qdev)
3220{
3221 int i, status = 0;
3222
3223 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3224 if (status)
3225 return status;
3226 /* Clear all the entries in the routing table. */
3227 for (i = 0; i < 16; i++) {
3228 status = ql_set_routing_reg(qdev, i, 0, 0);
3229 if (status) {
3230 QPRINTK(qdev, IFUP, ERR,
3231 "Failed to init routing register for CAM "
3232 "packets.\n");
3233 break;
3234 }
3235 }
3236 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3237 return status;
3238}
3239
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240/* Initialize the frame-to-queue routing. */
3241static int ql_route_initialize(struct ql_adapter *qdev)
3242{
3243 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003244
3245 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003246 status = ql_clear_routing_entries(qdev);
3247 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003248 return status;
3249
3250 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3251 if (status)
3252 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003253
3254 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3255 if (status) {
3256 QPRINTK(qdev, IFUP, ERR,
3257 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003258 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003259 }
3260 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3261 if (status) {
3262 QPRINTK(qdev, IFUP, ERR,
3263 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003264 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003265 }
3266 /* If we have more than one inbound queue, then turn on RSS in the
3267 * routing block.
3268 */
3269 if (qdev->rss_ring_count > 1) {
3270 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3271 RT_IDX_RSS_MATCH, 1);
3272 if (status) {
3273 QPRINTK(qdev, IFUP, ERR,
3274 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003275 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003276 }
3277 }
3278
3279 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3280 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003281 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003282 QPRINTK(qdev, IFUP, ERR,
3283 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003284exit:
3285 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003286 return status;
3287}
3288
Ron Mercer2ee1e272009-03-03 12:10:33 +00003289int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003290{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003291 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003292
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003293 /* If check if the link is up and use to
3294 * determine if we are setting or clearing
3295 * the MAC address in the CAM.
3296 */
3297 set = ql_read32(qdev, STS);
3298 set &= qdev->port_link_up;
3299 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003300 if (status) {
3301 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3302 return status;
3303 }
3304
3305 status = ql_route_initialize(qdev);
3306 if (status)
3307 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3308
3309 return status;
3310}
3311
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003312static int ql_adapter_initialize(struct ql_adapter *qdev)
3313{
3314 u32 value, mask;
3315 int i;
3316 int status = 0;
3317
3318 /*
3319 * Set up the System register to halt on errors.
3320 */
3321 value = SYS_EFE | SYS_FAE;
3322 mask = value << 16;
3323 ql_write32(qdev, SYS, mask | value);
3324
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003325 /* Set the default queue, and VLAN behavior. */
3326 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3327 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003328 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3329
3330 /* Set the MPI interrupt to enabled. */
3331 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3332
3333 /* Enable the function, set pagesize, enable error checking. */
3334 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3335 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3336
3337 /* Set/clear header splitting. */
3338 mask = FSC_VM_PAGESIZE_MASK |
3339 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3340 ql_write32(qdev, FSC, mask | value);
3341
3342 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
Ron Mercer52e55f32009-10-10 09:35:07 +00003343 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003344
Ron Mercera3b71932009-10-08 09:54:38 +00003345 /* Set RX packet routing to use port/pci function on which the
3346 * packet arrived on in addition to usual frame routing.
3347 * This is helpful on bonding where both interfaces can have
3348 * the same MAC address.
3349 */
3350 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003351 /* Reroute all packets to our Interface.
3352 * They may have been routed to MPI firmware
3353 * due to WOL.
3354 */
3355 value = ql_read32(qdev, MGMT_RCV_CFG);
3356 value &= ~MGMT_RCV_CFG_RM;
3357 mask = 0xffff0000;
3358
3359 /* Sticky reg needs clearing due to WOL. */
3360 ql_write32(qdev, MGMT_RCV_CFG, mask);
3361 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3362
3363 /* Default WOL is enable on Mezz cards */
3364 if (qdev->pdev->subsystem_device == 0x0068 ||
3365 qdev->pdev->subsystem_device == 0x0180)
3366 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003367
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003368 /* Start up the rx queues. */
3369 for (i = 0; i < qdev->rx_ring_count; i++) {
3370 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3371 if (status) {
3372 QPRINTK(qdev, IFUP, ERR,
3373 "Failed to start rx ring[%d].\n", i);
3374 return status;
3375 }
3376 }
3377
3378 /* If there is more than one inbound completion queue
3379 * then download a RICB to configure RSS.
3380 */
3381 if (qdev->rss_ring_count > 1) {
3382 status = ql_start_rss(qdev);
3383 if (status) {
3384 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3385 return status;
3386 }
3387 }
3388
3389 /* Start up the tx queues. */
3390 for (i = 0; i < qdev->tx_ring_count; i++) {
3391 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3392 if (status) {
3393 QPRINTK(qdev, IFUP, ERR,
3394 "Failed to start tx ring[%d].\n", i);
3395 return status;
3396 }
3397 }
3398
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003399 /* Initialize the port and set the max framesize. */
3400 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003401 if (status)
3402 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003403
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003404 /* Set up the MAC address and frame routing filter. */
3405 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003406 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003407 QPRINTK(qdev, IFUP, ERR,
3408 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003409 return status;
3410 }
3411
3412 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003413 for (i = 0; i < qdev->rss_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003414 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003415 i);
3416 napi_enable(&qdev->rx_ring[i].napi);
3417 }
3418
3419 return status;
3420}
3421
3422/* Issue soft reset to chip. */
3423static int ql_adapter_reset(struct ql_adapter *qdev)
3424{
3425 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003426 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003427 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003428
Ron Mercera5f59dc2009-07-02 06:06:07 +00003429 /* Clear all the entries in the routing table. */
3430 status = ql_clear_routing_entries(qdev);
3431 if (status) {
3432 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3433 return status;
3434 }
3435
3436 end_jiffies = jiffies +
3437 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003438
3439 /* Stop management traffic. */
3440 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3441
3442 /* Wait for the NIC and MGMNT FIFOs to empty. */
3443 ql_wait_fifo_empty(qdev);
3444
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003445 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003446
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003447 do {
3448 value = ql_read32(qdev, RST_FO);
3449 if ((value & RST_FO_FR) == 0)
3450 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003451 cpu_relax();
3452 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003453
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003454 if (value & RST_FO_FR) {
3455 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003456 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003457 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003458 }
3459
Ron Mercer84087f42009-10-08 09:54:41 +00003460 /* Resume management traffic. */
3461 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003462 return status;
3463}
3464
3465static void ql_display_dev_info(struct net_device *ndev)
3466{
3467 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3468
3469 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003470 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003471 "XG Roll = %d, XG Rev = %d.\n",
3472 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003473 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003474 qdev->chip_rev_id & 0x0000000f,
3475 qdev->chip_rev_id >> 4 & 0x0000000f,
3476 qdev->chip_rev_id >> 8 & 0x0000000f,
3477 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003478 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003479}
3480
Ron Mercerbc083ce2009-10-21 11:07:40 +00003481int ql_wol(struct ql_adapter *qdev)
3482{
3483 int status = 0;
3484 u32 wol = MB_WOL_DISABLE;
3485
3486 /* The CAM is still intact after a reset, but if we
3487 * are doing WOL, then we may need to program the
3488 * routing regs. We would also need to issue the mailbox
3489 * commands to instruct the MPI what to do per the ethtool
3490 * settings.
3491 */
3492
3493 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3494 WAKE_MCAST | WAKE_BCAST)) {
3495 QPRINTK(qdev, IFDOWN, ERR,
3496 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3497 qdev->wol);
3498 return -EINVAL;
3499 }
3500
3501 if (qdev->wol & WAKE_MAGIC) {
3502 status = ql_mb_wol_set_magic(qdev, 1);
3503 if (status) {
3504 QPRINTK(qdev, IFDOWN, ERR,
3505 "Failed to set magic packet on %s.\n",
3506 qdev->ndev->name);
3507 return status;
3508 } else
3509 QPRINTK(qdev, DRV, INFO,
3510 "Enabled magic packet successfully on %s.\n",
3511 qdev->ndev->name);
3512
3513 wol |= MB_WOL_MAGIC_PKT;
3514 }
3515
3516 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003517 wol |= MB_WOL_MODE_ON;
3518 status = ql_mb_wol_mode(qdev, wol);
3519 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3520 (status == 0) ? "Sucessfully set" : "Failed", wol,
3521 qdev->ndev->name);
3522 }
3523
3524 return status;
3525}
3526
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003527static int ql_adapter_down(struct ql_adapter *qdev)
3528{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003529 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003530
Ron Mercer6a473302009-07-02 06:06:12 +00003531 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003532
Ron Mercer6497b602009-02-12 16:37:13 -08003533 /* Don't kill the reset worker thread if we
3534 * are in the process of recovery.
3535 */
3536 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3537 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003538 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3539 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003540 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003541 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003542
Ron Mercer39aa8162009-08-27 11:02:11 +00003543 for (i = 0; i < qdev->rss_ring_count; i++)
3544 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003545
3546 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3547
3548 ql_disable_interrupts(qdev);
3549
3550 ql_tx_ring_clean(qdev);
3551
Ron Mercer6b318cb2009-03-09 10:59:26 +00003552 /* Call netif_napi_del() from common point.
3553 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003554 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003555 netif_napi_del(&qdev->rx_ring[i].napi);
3556
Ron Mercer4545a3f2009-02-23 10:42:17 +00003557 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003558
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 status = ql_adapter_reset(qdev);
3560 if (status)
3561 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3562 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 return status;
3564}
3565
3566static int ql_adapter_up(struct ql_adapter *qdev)
3567{
3568 int err = 0;
3569
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003570 err = ql_adapter_initialize(qdev);
3571 if (err) {
3572 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003573 goto err_init;
3574 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003575 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003576 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003577 /* If the port is initialized and the
3578 * link is up the turn on the carrier.
3579 */
3580 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3581 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003582 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003583 ql_enable_interrupts(qdev);
3584 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003585 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586
3587 return 0;
3588err_init:
3589 ql_adapter_reset(qdev);
3590 return err;
3591}
3592
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003593static void ql_release_adapter_resources(struct ql_adapter *qdev)
3594{
3595 ql_free_mem_resources(qdev);
3596 ql_free_irq(qdev);
3597}
3598
3599static int ql_get_adapter_resources(struct ql_adapter *qdev)
3600{
3601 int status = 0;
3602
3603 if (ql_alloc_mem_resources(qdev)) {
3604 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3605 return -ENOMEM;
3606 }
3607 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003608 return status;
3609}
3610
3611static int qlge_close(struct net_device *ndev)
3612{
3613 struct ql_adapter *qdev = netdev_priv(ndev);
3614
3615 /*
3616 * Wait for device to recover from a reset.
3617 * (Rarely happens, but possible.)
3618 */
3619 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3620 msleep(1);
3621 ql_adapter_down(qdev);
3622 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003623 return 0;
3624}
3625
3626static int ql_configure_rings(struct ql_adapter *qdev)
3627{
3628 int i;
3629 struct rx_ring *rx_ring;
3630 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003631 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00003632 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3633 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3634
3635 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003636
Ron Mercera4ab6132009-08-27 11:02:10 +00003637 /* In a perfect world we have one RSS ring for each CPU
3638 * and each has it's own vector. To do that we ask for
3639 * cpu_cnt vectors. ql_enable_msix() will adjust the
3640 * vector count to what we actually get. We then
3641 * allocate an RSS ring for each.
3642 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003643 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003644 qdev->intr_count = cpu_cnt;
3645 ql_enable_msix(qdev);
3646 /* Adjust the RSS ring count to the actual vector count. */
3647 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003648 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003649 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003650
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003651 for (i = 0; i < qdev->tx_ring_count; i++) {
3652 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003653 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003654 tx_ring->qdev = qdev;
3655 tx_ring->wq_id = i;
3656 tx_ring->wq_len = qdev->tx_ring_size;
3657 tx_ring->wq_size =
3658 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3659
3660 /*
3661 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00003662 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003663 */
Ron Mercer39aa8162009-08-27 11:02:11 +00003664 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003665 }
3666
3667 for (i = 0; i < qdev->rx_ring_count; i++) {
3668 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003669 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003670 rx_ring->qdev = qdev;
3671 rx_ring->cq_id = i;
3672 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003673 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00003674 /*
3675 * Inbound (RSS) queues.
3676 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003677 rx_ring->cq_len = qdev->rx_ring_size;
3678 rx_ring->cq_size =
3679 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3680 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3681 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003682 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00003683 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3684 QPRINTK(qdev, IFUP, DEBUG,
3685 "lbq_buf_size %d, order = %d\n",
3686 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003687 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3688 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003689 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00003690 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003691 rx_ring->type = RX_Q;
3692 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003693 /*
3694 * Outbound queue handles outbound completions only.
3695 */
3696 /* outbound cq is same size as tx_ring it services. */
3697 rx_ring->cq_len = qdev->tx_ring_size;
3698 rx_ring->cq_size =
3699 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3700 rx_ring->lbq_len = 0;
3701 rx_ring->lbq_size = 0;
3702 rx_ring->lbq_buf_size = 0;
3703 rx_ring->sbq_len = 0;
3704 rx_ring->sbq_size = 0;
3705 rx_ring->sbq_buf_size = 0;
3706 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003707 }
3708 }
3709 return 0;
3710}
3711
3712static int qlge_open(struct net_device *ndev)
3713{
3714 int err = 0;
3715 struct ql_adapter *qdev = netdev_priv(ndev);
3716
Ron Mercer74e12432009-11-11 12:54:04 +00003717 err = ql_adapter_reset(qdev);
3718 if (err)
3719 return err;
3720
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003721 err = ql_configure_rings(qdev);
3722 if (err)
3723 return err;
3724
3725 err = ql_get_adapter_resources(qdev);
3726 if (err)
3727 goto error_up;
3728
3729 err = ql_adapter_up(qdev);
3730 if (err)
3731 goto error_up;
3732
3733 return err;
3734
3735error_up:
3736 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003737 return err;
3738}
3739
Ron Mercer7c734352009-10-19 03:32:19 +00003740static int ql_change_rx_buffers(struct ql_adapter *qdev)
3741{
3742 struct rx_ring *rx_ring;
3743 int i, status;
3744 u32 lbq_buf_len;
3745
3746 /* Wait for an oustanding reset to complete. */
3747 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3748 int i = 3;
3749 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3750 QPRINTK(qdev, IFUP, ERR,
3751 "Waiting for adapter UP...\n");
3752 ssleep(1);
3753 }
3754
3755 if (!i) {
3756 QPRINTK(qdev, IFUP, ERR,
3757 "Timed out waiting for adapter UP\n");
3758 return -ETIMEDOUT;
3759 }
3760 }
3761
3762 status = ql_adapter_down(qdev);
3763 if (status)
3764 goto error;
3765
3766 /* Get the new rx buffer size. */
3767 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3768 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3769 qdev->lbq_buf_order = get_order(lbq_buf_len);
3770
3771 for (i = 0; i < qdev->rss_ring_count; i++) {
3772 rx_ring = &qdev->rx_ring[i];
3773 /* Set the new size. */
3774 rx_ring->lbq_buf_size = lbq_buf_len;
3775 }
3776
3777 status = ql_adapter_up(qdev);
3778 if (status)
3779 goto error;
3780
3781 return status;
3782error:
3783 QPRINTK(qdev, IFUP, ALERT,
3784 "Driver up/down cycle failed, closing device.\n");
3785 set_bit(QL_ADAPTER_UP, &qdev->flags);
3786 dev_close(qdev->ndev);
3787 return status;
3788}
3789
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003790static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3791{
3792 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00003793 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003794
3795 if (ndev->mtu == 1500 && new_mtu == 9000) {
3796 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3797 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3798 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3799 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3800 (ndev->mtu == 9000 && new_mtu == 9000)) {
3801 return 0;
3802 } else
3803 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00003804
3805 queue_delayed_work(qdev->workqueue,
3806 &qdev->mpi_port_cfg_work, 3*HZ);
3807
3808 if (!netif_running(qdev->ndev)) {
3809 ndev->mtu = new_mtu;
3810 return 0;
3811 }
3812
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 ndev->mtu = new_mtu;
Ron Mercer7c734352009-10-19 03:32:19 +00003814 status = ql_change_rx_buffers(qdev);
3815 if (status) {
3816 QPRINTK(qdev, IFUP, ERR,
3817 "Changing MTU failed.\n");
3818 }
3819
3820 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003821}
3822
3823static struct net_device_stats *qlge_get_stats(struct net_device
3824 *ndev)
3825{
Ron Mercer885ee392009-11-03 13:49:31 +00003826 struct ql_adapter *qdev = netdev_priv(ndev);
3827 struct rx_ring *rx_ring = &qdev->rx_ring[0];
3828 struct tx_ring *tx_ring = &qdev->tx_ring[0];
3829 unsigned long pkts, mcast, dropped, errors, bytes;
3830 int i;
3831
3832 /* Get RX stats. */
3833 pkts = mcast = dropped = errors = bytes = 0;
3834 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
3835 pkts += rx_ring->rx_packets;
3836 bytes += rx_ring->rx_bytes;
3837 dropped += rx_ring->rx_dropped;
3838 errors += rx_ring->rx_errors;
3839 mcast += rx_ring->rx_multicast;
3840 }
3841 ndev->stats.rx_packets = pkts;
3842 ndev->stats.rx_bytes = bytes;
3843 ndev->stats.rx_dropped = dropped;
3844 ndev->stats.rx_errors = errors;
3845 ndev->stats.multicast = mcast;
3846
3847 /* Get TX stats. */
3848 pkts = errors = bytes = 0;
3849 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
3850 pkts += tx_ring->tx_packets;
3851 bytes += tx_ring->tx_bytes;
3852 errors += tx_ring->tx_errors;
3853 }
3854 ndev->stats.tx_packets = pkts;
3855 ndev->stats.tx_bytes = bytes;
3856 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00003857 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003858}
3859
3860static void qlge_set_multicast_list(struct net_device *ndev)
3861{
3862 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3863 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00003864 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003865
Ron Mercercc288f52009-02-23 10:42:14 +00003866 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3867 if (status)
3868 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003869 /*
3870 * Set or clear promiscuous mode if a
3871 * transition is taking place.
3872 */
3873 if (ndev->flags & IFF_PROMISC) {
3874 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3875 if (ql_set_routing_reg
3876 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3877 QPRINTK(qdev, HW, ERR,
3878 "Failed to set promiscous mode.\n");
3879 } else {
3880 set_bit(QL_PROMISCUOUS, &qdev->flags);
3881 }
3882 }
3883 } else {
3884 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3885 if (ql_set_routing_reg
3886 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3887 QPRINTK(qdev, HW, ERR,
3888 "Failed to clear promiscous mode.\n");
3889 } else {
3890 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3891 }
3892 }
3893 }
3894
3895 /*
3896 * Set or clear all multicast mode if a
3897 * transition is taking place.
3898 */
3899 if ((ndev->flags & IFF_ALLMULTI) ||
3900 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3901 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3902 if (ql_set_routing_reg
3903 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3904 QPRINTK(qdev, HW, ERR,
3905 "Failed to set all-multi mode.\n");
3906 } else {
3907 set_bit(QL_ALLMULTI, &qdev->flags);
3908 }
3909 }
3910 } else {
3911 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3912 if (ql_set_routing_reg
3913 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3914 QPRINTK(qdev, HW, ERR,
3915 "Failed to clear all-multi mode.\n");
3916 } else {
3917 clear_bit(QL_ALLMULTI, &qdev->flags);
3918 }
3919 }
3920 }
3921
3922 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00003923 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3924 if (status)
3925 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003926 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3927 i++, mc_ptr = mc_ptr->next)
3928 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3929 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3930 QPRINTK(qdev, HW, ERR,
3931 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00003932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933 goto exit;
3934 }
Ron Mercercc288f52009-02-23 10:42:14 +00003935 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003936 if (ql_set_routing_reg
3937 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3938 QPRINTK(qdev, HW, ERR,
3939 "Failed to set multicast match mode.\n");
3940 } else {
3941 set_bit(QL_ALLMULTI, &qdev->flags);
3942 }
3943 }
3944exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00003945 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003946}
3947
3948static int qlge_set_mac_address(struct net_device *ndev, void *p)
3949{
3950 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3951 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00003952 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003953
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954 if (!is_valid_ether_addr(addr->sa_data))
3955 return -EADDRNOTAVAIL;
3956 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3957
Ron Mercercc288f52009-02-23 10:42:14 +00003958 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3959 if (status)
3960 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00003961 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3962 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00003963 if (status)
3964 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3965 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3966 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003967}
3968
3969static void qlge_tx_timeout(struct net_device *ndev)
3970{
3971 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003972 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003973}
3974
3975static void ql_asic_reset_work(struct work_struct *work)
3976{
3977 struct ql_adapter *qdev =
3978 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00003979 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003980 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00003981 status = ql_adapter_down(qdev);
3982 if (status)
3983 goto error;
3984
3985 status = ql_adapter_up(qdev);
3986 if (status)
3987 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00003988
3989 /* Restore rx mode. */
3990 clear_bit(QL_ALLMULTI, &qdev->flags);
3991 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3992 qlge_set_multicast_list(qdev->ndev);
3993
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003994 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00003995 return;
3996error:
3997 QPRINTK(qdev, IFUP, ALERT,
3998 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003999
Ron Mercerdb988122009-03-09 10:59:17 +00004000 set_bit(QL_ADAPTER_UP, &qdev->flags);
4001 dev_close(qdev->ndev);
4002 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004003}
4004
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004005static struct nic_operations qla8012_nic_ops = {
4006 .get_flash = ql_get_8012_flash_params,
4007 .port_initialize = ql_8012_port_initialize,
4008};
4009
Ron Mercercdca8d02009-03-02 08:07:31 +00004010static struct nic_operations qla8000_nic_ops = {
4011 .get_flash = ql_get_8000_flash_params,
4012 .port_initialize = ql_8000_port_initialize,
4013};
4014
Ron Mercere4552f52009-06-09 05:39:32 +00004015/* Find the pcie function number for the other NIC
4016 * on this chip. Since both NIC functions share a
4017 * common firmware we have the lowest enabled function
4018 * do any common work. Examples would be resetting
4019 * after a fatal firmware error, or doing a firmware
4020 * coredump.
4021 */
4022static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004023{
Ron Mercere4552f52009-06-09 05:39:32 +00004024 int status = 0;
4025 u32 temp;
4026 u32 nic_func1, nic_func2;
4027
4028 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4029 &temp);
4030 if (status)
4031 return status;
4032
4033 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4034 MPI_TEST_NIC_FUNC_MASK);
4035 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4036 MPI_TEST_NIC_FUNC_MASK);
4037
4038 if (qdev->func == nic_func1)
4039 qdev->alt_func = nic_func2;
4040 else if (qdev->func == nic_func2)
4041 qdev->alt_func = nic_func1;
4042 else
4043 status = -EIO;
4044
4045 return status;
4046}
4047
4048static int ql_get_board_info(struct ql_adapter *qdev)
4049{
4050 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004051 qdev->func =
4052 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004053 if (qdev->func > 3)
4054 return -EIO;
4055
4056 status = ql_get_alt_pcie_func(qdev);
4057 if (status)
4058 return status;
4059
4060 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4061 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004062 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4063 qdev->port_link_up = STS_PL1;
4064 qdev->port_init = STS_PI1;
4065 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4066 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4067 } else {
4068 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4069 qdev->port_link_up = STS_PL0;
4070 qdev->port_init = STS_PI0;
4071 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4072 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4073 }
4074 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004075 qdev->device_id = qdev->pdev->device;
4076 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4077 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004078 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4079 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004080 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004081}
4082
4083static void ql_release_all(struct pci_dev *pdev)
4084{
4085 struct net_device *ndev = pci_get_drvdata(pdev);
4086 struct ql_adapter *qdev = netdev_priv(ndev);
4087
4088 if (qdev->workqueue) {
4089 destroy_workqueue(qdev->workqueue);
4090 qdev->workqueue = NULL;
4091 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004092
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004093 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004094 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004095 if (qdev->doorbell_area)
4096 iounmap(qdev->doorbell_area);
4097 pci_release_regions(pdev);
4098 pci_set_drvdata(pdev, NULL);
4099}
4100
4101static int __devinit ql_init_device(struct pci_dev *pdev,
4102 struct net_device *ndev, int cards_found)
4103{
4104 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004105 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004106
Ron Mercere3324712009-07-02 06:06:13 +00004107 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004108 err = pci_enable_device(pdev);
4109 if (err) {
4110 dev_err(&pdev->dev, "PCI device enable failed.\n");
4111 return err;
4112 }
4113
Ron Mercerebd6e772009-09-29 08:39:25 +00004114 qdev->ndev = ndev;
4115 qdev->pdev = pdev;
4116 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004117
Ron Mercerbc9167f2009-10-10 09:35:04 +00004118 /* Set PCIe read request size */
4119 err = pcie_set_readrq(pdev, 4096);
4120 if (err) {
4121 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004122 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004123 }
4124
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004125 err = pci_request_regions(pdev, DRV_NAME);
4126 if (err) {
4127 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004128 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004129 }
4130
4131 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004132 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004133 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004134 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004135 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004136 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004137 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004138 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004139 }
4140
4141 if (err) {
4142 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004143 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004144 }
4145
Ron Mercer73475332009-11-06 07:44:58 +00004146 /* Set PCIe reset type for EEH to fundamental. */
4147 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004148 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004149 qdev->reg_base =
4150 ioremap_nocache(pci_resource_start(pdev, 1),
4151 pci_resource_len(pdev, 1));
4152 if (!qdev->reg_base) {
4153 dev_err(&pdev->dev, "Register mapping failed.\n");
4154 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004155 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004156 }
4157
4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4159 qdev->doorbell_area =
4160 ioremap_nocache(pci_resource_start(pdev, 3),
4161 pci_resource_len(pdev, 3));
4162 if (!qdev->doorbell_area) {
4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4164 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004165 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004166 }
4167
Ron Mercere4552f52009-06-09 05:39:32 +00004168 err = ql_get_board_info(qdev);
4169 if (err) {
4170 dev_err(&pdev->dev, "Register access failed.\n");
4171 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004172 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004173 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004174 qdev->msg_enable = netif_msg_init(debug, default_msg);
4175 spin_lock_init(&qdev->hw_lock);
4176 spin_lock_init(&qdev->stats_lock);
4177
4178 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004179 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004180 if (err) {
4181 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004182 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004183 }
4184
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4186
4187 /* Set up the default ring sizes. */
4188 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4189 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4190
4191 /* Set up the coalescing parameters. */
4192 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4193 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4194 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4195 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4196
4197 /*
4198 * Set up the operating parameters.
4199 */
4200 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004201 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4202 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4203 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4204 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004205 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004206 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004207 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004208
4209 if (!cards_found) {
4210 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4211 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4212 DRV_NAME, DRV_VERSION);
4213 }
4214 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004215err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004216 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004217err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004218 pci_disable_device(pdev);
4219 return err;
4220}
4221
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004222static const struct net_device_ops qlge_netdev_ops = {
4223 .ndo_open = qlge_open,
4224 .ndo_stop = qlge_close,
4225 .ndo_start_xmit = qlge_send,
4226 .ndo_change_mtu = qlge_change_mtu,
4227 .ndo_get_stats = qlge_get_stats,
4228 .ndo_set_multicast_list = qlge_set_multicast_list,
4229 .ndo_set_mac_address = qlge_set_mac_address,
4230 .ndo_validate_addr = eth_validate_addr,
4231 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004232 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4233 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4234 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004235};
4236
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004237static int __devinit qlge_probe(struct pci_dev *pdev,
4238 const struct pci_device_id *pci_entry)
4239{
4240 struct net_device *ndev = NULL;
4241 struct ql_adapter *qdev = NULL;
4242 static int cards_found = 0;
4243 int err = 0;
4244
Ron Mercer1e213302009-03-09 10:59:21 +00004245 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4246 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004247 if (!ndev)
4248 return -ENOMEM;
4249
4250 err = ql_init_device(pdev, ndev, cards_found);
4251 if (err < 0) {
4252 free_netdev(ndev);
4253 return err;
4254 }
4255
4256 qdev = netdev_priv(ndev);
4257 SET_NETDEV_DEV(ndev, &pdev->dev);
4258 ndev->features = (0
4259 | NETIF_F_IP_CSUM
4260 | NETIF_F_SG
4261 | NETIF_F_TSO
4262 | NETIF_F_TSO6
4263 | NETIF_F_TSO_ECN
4264 | NETIF_F_HW_VLAN_TX
4265 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004266 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004267
4268 if (test_bit(QL_DMA64, &qdev->flags))
4269 ndev->features |= NETIF_F_HIGHDMA;
4270
4271 /*
4272 * Set up net_device structure.
4273 */
4274 ndev->tx_queue_len = qdev->tx_ring_size;
4275 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004276
4277 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004278 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004279 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004280
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004281 err = register_netdev(ndev);
4282 if (err) {
4283 dev_err(&pdev->dev, "net device registration failed.\n");
4284 ql_release_all(pdev);
4285 pci_disable_device(pdev);
4286 return err;
4287 }
Ron Mercer6a473302009-07-02 06:06:12 +00004288 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004289 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004290 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004291 cards_found++;
4292 return 0;
4293}
4294
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004295netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4296{
4297 return qlge_send(skb, ndev);
4298}
4299
4300int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4301{
4302 return ql_clean_inbound_rx_ring(rx_ring, budget);
4303}
4304
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004305static void __devexit qlge_remove(struct pci_dev *pdev)
4306{
4307 struct net_device *ndev = pci_get_drvdata(pdev);
4308 unregister_netdev(ndev);
4309 ql_release_all(pdev);
4310 pci_disable_device(pdev);
4311 free_netdev(ndev);
4312}
4313
Ron Mercer6d190c62009-10-28 08:39:20 +00004314/* Clean up resources without touching hardware. */
4315static void ql_eeh_close(struct net_device *ndev)
4316{
4317 int i;
4318 struct ql_adapter *qdev = netdev_priv(ndev);
4319
4320 if (netif_carrier_ok(ndev)) {
4321 netif_carrier_off(ndev);
4322 netif_stop_queue(ndev);
4323 }
4324
4325 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4326 cancel_delayed_work_sync(&qdev->asic_reset_work);
4327 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4328 cancel_delayed_work_sync(&qdev->mpi_work);
4329 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4330 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4331
4332 for (i = 0; i < qdev->rss_ring_count; i++)
4333 netif_napi_del(&qdev->rx_ring[i].napi);
4334
4335 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4336 ql_tx_ring_clean(qdev);
4337 ql_free_rx_buffers(qdev);
4338 ql_release_adapter_resources(qdev);
4339}
4340
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004341/*
4342 * This callback is called by the PCI subsystem whenever
4343 * a PCI bus error is detected.
4344 */
4345static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4346 enum pci_channel_state state)
4347{
4348 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004349
Ron Mercer6d190c62009-10-28 08:39:20 +00004350 switch (state) {
4351 case pci_channel_io_normal:
4352 return PCI_ERS_RESULT_CAN_RECOVER;
4353 case pci_channel_io_frozen:
4354 netif_device_detach(ndev);
4355 if (netif_running(ndev))
4356 ql_eeh_close(ndev);
4357 pci_disable_device(pdev);
4358 return PCI_ERS_RESULT_NEED_RESET;
4359 case pci_channel_io_perm_failure:
4360 dev_err(&pdev->dev,
4361 "%s: pci_channel_io_perm_failure.\n", __func__);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004362 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004363 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004364
4365 /* Request a slot reset. */
4366 return PCI_ERS_RESULT_NEED_RESET;
4367}
4368
4369/*
4370 * This callback is called after the PCI buss has been reset.
4371 * Basically, this tries to restart the card from scratch.
4372 * This is a shortened version of the device probe/discovery code,
4373 * it resembles the first-half of the () routine.
4374 */
4375static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4376{
4377 struct net_device *ndev = pci_get_drvdata(pdev);
4378 struct ql_adapter *qdev = netdev_priv(ndev);
4379
Ron Mercer6d190c62009-10-28 08:39:20 +00004380 pdev->error_state = pci_channel_io_normal;
4381
4382 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004383 if (pci_enable_device(pdev)) {
4384 QPRINTK(qdev, IFUP, ERR,
4385 "Cannot re-enable PCI device after reset.\n");
4386 return PCI_ERS_RESULT_DISCONNECT;
4387 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004388 pci_set_master(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004389 return PCI_ERS_RESULT_RECOVERED;
4390}
4391
4392static void qlge_io_resume(struct pci_dev *pdev)
4393{
4394 struct net_device *ndev = pci_get_drvdata(pdev);
4395 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004396 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004397
Ron Mercer6d190c62009-10-28 08:39:20 +00004398 if (ql_adapter_reset(qdev))
4399 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004400 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004401 err = qlge_open(ndev);
4402 if (err) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004403 QPRINTK(qdev, IFUP, ERR,
4404 "Device initialization failed after reset.\n");
4405 return;
4406 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004407 } else {
4408 QPRINTK(qdev, IFUP, ERR,
4409 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004410 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004411 netif_device_attach(ndev);
4412}
4413
4414static struct pci_error_handlers qlge_err_handler = {
4415 .error_detected = qlge_io_error_detected,
4416 .slot_reset = qlge_io_slot_reset,
4417 .resume = qlge_io_resume,
4418};
4419
4420static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4421{
4422 struct net_device *ndev = pci_get_drvdata(pdev);
4423 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004424 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004425
4426 netif_device_detach(ndev);
4427
4428 if (netif_running(ndev)) {
4429 err = ql_adapter_down(qdev);
4430 if (!err)
4431 return err;
4432 }
4433
Ron Mercerbc083ce2009-10-21 11:07:40 +00004434 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004435 err = pci_save_state(pdev);
4436 if (err)
4437 return err;
4438
4439 pci_disable_device(pdev);
4440
4441 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4442
4443 return 0;
4444}
4445
David S. Miller04da2cf2008-09-19 16:14:24 -07004446#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004447static int qlge_resume(struct pci_dev *pdev)
4448{
4449 struct net_device *ndev = pci_get_drvdata(pdev);
4450 struct ql_adapter *qdev = netdev_priv(ndev);
4451 int err;
4452
4453 pci_set_power_state(pdev, PCI_D0);
4454 pci_restore_state(pdev);
4455 err = pci_enable_device(pdev);
4456 if (err) {
4457 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4458 return err;
4459 }
4460 pci_set_master(pdev);
4461
4462 pci_enable_wake(pdev, PCI_D3hot, 0);
4463 pci_enable_wake(pdev, PCI_D3cold, 0);
4464
4465 if (netif_running(ndev)) {
4466 err = ql_adapter_up(qdev);
4467 if (err)
4468 return err;
4469 }
4470
4471 netif_device_attach(ndev);
4472
4473 return 0;
4474}
David S. Miller04da2cf2008-09-19 16:14:24 -07004475#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004476
4477static void qlge_shutdown(struct pci_dev *pdev)
4478{
4479 qlge_suspend(pdev, PMSG_SUSPEND);
4480}
4481
4482static struct pci_driver qlge_driver = {
4483 .name = DRV_NAME,
4484 .id_table = qlge_pci_tbl,
4485 .probe = qlge_probe,
4486 .remove = __devexit_p(qlge_remove),
4487#ifdef CONFIG_PM
4488 .suspend = qlge_suspend,
4489 .resume = qlge_resume,
4490#endif
4491 .shutdown = qlge_shutdown,
4492 .err_handler = &qlge_err_handler
4493};
4494
4495static int __init qlge_init_module(void)
4496{
4497 return pci_register_driver(&qlge_driver);
4498}
4499
4500static void __exit qlge_exit(void)
4501{
4502 pci_unregister_driver(&qlge_driver);
4503}
4504
4505module_init(qlge_init_module);
4506module_exit(qlge_exit);