blob: dd0ea02775507f321044a7f77782ae770df97b36 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000077 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
Ron Mercer4322c5b2009-07-02 06:06:06 +0000216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400354 case MAC_ADDR_TYPE_CAM_MAC:
355 {
356 u32 cam_output;
357 u32 upper = (addr[0] << 8) | addr[1];
358 u32 lower =
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]);
361
Ron Mercer49740972009-02-26 10:08:36 +0000362 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700363 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400364 " at index %d in the CAM.\n",
365 ((type ==
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700367 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400368
369 status =
370 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800371 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 if (status)
373 goto exit;
374 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375 (index << MAC_ADDR_IDX_SHIFT) | /* index */
376 type); /* type */
377 ql_write32(qdev, MAC_ADDR_DATA, lower);
378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, upper);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
398 */
Ron Mercer76b26692009-10-08 09:54:40 +0000399 cam_output = (CAM_OUT_ROUTE_NIC |
400 (qdev->
401 func << CAM_OUT_FUNC_SHIFT) |
402 (0 << CAM_OUT_CQ_ID_SHIFT));
403 if (qdev->vlgrp)
404 cam_output |= CAM_OUT_RV;
405 /* route to NIC core */
406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400407 break;
408 }
409 case MAC_ADDR_TYPE_VLAN:
410 {
411 u32 enable_bit = *((u32 *) &addr[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
416 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit ? "Adding" : "Removing"),
419 index, (enable_bit ? "to" : "from"));
420
421 status =
422 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800423 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400424 if (status)
425 goto exit;
426 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427 (index << MAC_ADDR_IDX_SHIFT) | /* index */
428 type | /* type */
429 enable_bit); /* enable/disable */
430 break;
431 }
432 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default:
434 QPRINTK(qdev, IFUP, CRIT,
435 "Address type %d not yet supported.\n", type);
436 status = -EPERM;
437 }
438exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400439 return status;
440}
441
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000442/* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
445 */
446static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447{
448 int status;
449 char zero_mac_addr[ETH_ALEN];
450 char *addr;
451
452 if (set) {
453 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG,
462 "Clearing MAC address on %s\n",
463 qdev->ndev->name);
464 }
465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466 if (status)
467 return status;
468 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status)
472 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473 "address.\n");
474 return status;
475}
476
Ron Mercer6a473302009-07-02 06:06:12 +0000477void ql_link_on(struct ql_adapter *qdev)
478{
479 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480 qdev->ndev->name);
481 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1);
483}
484
485void ql_link_off(struct ql_adapter *qdev)
486{
487 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488 qdev->ndev->name);
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 QPRINTK(qdev, IFUP, DEBUG,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable ? "Adding" : "Removing"),
528 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530 ((index ==
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545 (enable ? "to" : "from"));
546
547 switch (mask) {
548 case RT_IDX_CAM_HIT:
549 {
550 value = RT_IDX_DST_CAM_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break;
554 }
555 case RT_IDX_VALID: /* Promiscuous Mode frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560 break;
561 }
562 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
563 {
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
577 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000578 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
583 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
584 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000585 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588 break;
589 }
590 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
591 {
592 value = RT_IDX_DST_RSS | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595 break;
596 }
597 case 0: /* Clear the E-bit on an entry. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (index << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 default:
605 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606 mask);
607 status = -EPERM;
608 goto exit;
609 }
610
611 if (value) {
612 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613 if (status)
614 goto exit;
615 value |= (enable ? RT_IDX_E : 0);
616 ql_write32(qdev, RT_IDX, value);
617 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 }
619exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400620 return status;
621}
622
623static void ql_enable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626}
627
628static void ql_disable_interrupts(struct ql_adapter *qdev)
629{
630 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631}
632
633/* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
638 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700639u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400640{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700641 u32 var = 0;
642 unsigned long hw_flags = 0;
643 struct intr_context *ctx = qdev->intr_context + intr;
644
645 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
648 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700650 ctx->intr_en_mask);
651 var = ql_read32(qdev, STS);
652 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400653 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700654
655 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656 if (atomic_dec_and_test(&ctx->irq_cnt)) {
657 ql_write32(qdev, INTR_EN,
658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 }
661 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663}
664
665static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666{
667 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674 return 0;
675
676 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000677 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700680 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 var = ql_read32(qdev, STS);
682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000684 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400685 return var;
686}
687
688static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689{
690 int i;
691 for (i = 0; i < qdev->intr_count; i++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
695 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700696 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697 i == 0))
698 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400699 ql_enable_completion_interrupt(qdev, i);
700 }
701
702}
703
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000704static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705{
706 int status, i;
707 u16 csum = 0;
708 __le16 *flash = (__le16 *)&qdev->flash;
709
710 status = strncmp((char *)&qdev->flash, str, 4);
711 if (status) {
712 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713 return status;
714 }
715
716 for (i = 0; i < size; i++)
717 csum += le16_to_cpu(*flash++);
718
719 if (csum)
720 QPRINTK(qdev, IFUP, ERR,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723 return csum;
724}
725
Ron Mercer26351472009-02-02 13:53:57 -0800726static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400727{
728 int status = 0;
729 /* wait for reg to come ready */
730 status = ql_wait_reg_rdy(qdev,
731 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732 if (status)
733 goto exit;
734 /* set up for reg read */
735 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736 /* wait for reg to come ready */
737 status = ql_wait_reg_rdy(qdev,
738 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739 if (status)
740 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
744 */
745 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400746exit:
747 return status;
748}
749
Ron Mercercdca8d02009-03-02 08:07:31 +0000750static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751{
752 u32 i, size;
753 int status;
754 __le32 *p = (__le32 *)&qdev->flash;
755 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000756 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000757
758 /* Get flash offset for function and adjust
759 * for dword access.
760 */
Ron Mercere4552f52009-06-09 05:39:32 +0000761 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000762 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763 else
764 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767 return -ETIMEDOUT;
768
769 size = sizeof(struct flash_params_8000) / sizeof(u32);
770 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p);
772 if (status) {
773 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774 goto exit;
775 }
776 }
777
778 status = ql_validate_flash(qdev,
779 sizeof(struct flash_params_8000) / sizeof(u16),
780 "8000");
781 if (status) {
782 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783 status = -EINVAL;
784 goto exit;
785 }
786
Ron Mercer542512e2009-06-09 05:39:33 +0000787 /* Extract either manufacturer or BOFM modified
788 * MAC address.
789 */
790 if (qdev->flash.flash_params_8000.data_type1 == 2)
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr1,
793 qdev->ndev->addr_len);
794 else
795 memcpy(mac_addr,
796 qdev->flash.flash_params_8000.mac_addr,
797 qdev->ndev->addr_len);
798
799 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000800 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801 status = -EINVAL;
802 goto exit;
803 }
804
805 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000806 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000807 qdev->ndev->addr_len);
808
809exit:
810 ql_sem_unlock(qdev, SEM_FLASH_MASK);
811 return status;
812}
813
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000814static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400815{
816 int i;
817 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800818 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800819 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000820 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800821
822 /* Second function's parameters follow the first
823 * function's.
824 */
Ron Mercere4552f52009-06-09 05:39:32 +0000825 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400827
828 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829 return -ETIMEDOUT;
830
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000831 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800832 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400833 if (status) {
834 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835 goto exit;
836 }
837
838 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000839
840 status = ql_validate_flash(qdev,
841 sizeof(struct flash_params_8012) / sizeof(u16),
842 "8012");
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845 status = -EINVAL;
846 goto exit;
847 }
848
849 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 status = -EINVAL;
851 goto exit;
852 }
853
854 memcpy(qdev->ndev->dev_addr,
855 qdev->flash.flash_params_8012.mac_addr,
856 qdev->ndev->addr_len);
857
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400858exit:
859 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 return status;
861}
862
863/* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
866 */
867static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868{
869 int status;
870 /* wait for reg to come ready */
871 status = ql_wait_reg_rdy(qdev,
872 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 if (status)
874 return status;
875 /* write the data to the data reg */
876 ql_write32(qdev, XGMAC_DATA, data);
877 /* trigger the write */
878 ql_write32(qdev, XGMAC_ADDR, reg);
879 return status;
880}
881
882/* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
885 */
886int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887{
888 int status = 0;
889 /* wait for reg to come ready */
890 status = ql_wait_reg_rdy(qdev,
891 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892 if (status)
893 goto exit;
894 /* set up for reg read */
895 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 goto exit;
901 /* get the data */
902 *data = ql_read32(qdev, XGMAC_DATA);
903exit:
904 return status;
905}
906
907/* This is used for reading the 64-bit statistics regs. */
908int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909{
910 int status = 0;
911 u32 hi = 0;
912 u32 lo = 0;
913
914 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 if (status)
916 goto exit;
917
918 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 if (status)
920 goto exit;
921
922 *data = (u64) lo | ((u64) hi << 32);
923
924exit:
925 return status;
926}
927
Ron Mercercdca8d02009-03-02 08:07:31 +0000928static int ql_8000_port_initialize(struct ql_adapter *qdev)
929{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000930 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000931 /*
932 * Get MPI firmware version for driver banner
933 * and ethool info.
934 */
935 status = ql_mb_about_fw(qdev);
936 if (status)
937 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000938 status = ql_mb_get_fw_state(qdev);
939 if (status)
940 goto exit;
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943exit:
944 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000945}
946
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400947/* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
951 * later date.
952 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000953static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400954{
955 int status = 0;
956 u32 data;
957
958 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
961 */
962 QPRINTK(qdev, LINK, INFO,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965 if (status) {
966 QPRINTK(qdev, LINK, CRIT,
967 "Port initialize timed out.\n");
968 }
969 return status;
970 }
971
972 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975 if (status)
976 goto end;
977 data |= GLOBAL_CFG_RESET;
978 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 if (status)
980 goto end;
981
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
984 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
985 data |= GLOBAL_CFG_TX_STAT_EN;
986 data |= GLOBAL_CFG_RX_STAT_EN;
987 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 if (status)
989 goto end;
990
991 /* Enable transmitter, and clear it's reset. */
992 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993 if (status)
994 goto end;
995 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
996 data |= TX_CFG_EN; /* Enable the transmitter. */
997 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable receiver and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1006 data |= RX_CFG_EN; /* Enable the receiver. */
1007 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Turn on jumbo. */
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 if (status)
1015 goto end;
1016 status =
1017 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 if (status)
1019 goto end;
1020
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023end:
1024 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 return status;
1026}
1027
Ron Mercer7c734352009-10-19 03:32:19 +00001028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001033/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001035{
1036 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1037 rx_ring->lbq_curr_idx++;
1038 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1039 rx_ring->lbq_curr_idx = 0;
1040 rx_ring->lbq_free_cnt++;
1041 return lbq_desc;
1042}
1043
Ron Mercer7c734352009-10-19 03:32:19 +00001044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001068{
1069 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1070 rx_ring->sbq_curr_idx++;
1071 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1072 rx_ring->sbq_curr_idx = 0;
1073 rx_ring->sbq_free_cnt++;
1074 return sbq_desc;
1075}
1076
1077/* Update an rx ring index. */
1078static void ql_update_cq(struct rx_ring *rx_ring)
1079{
1080 rx_ring->cnsmr_idx++;
1081 rx_ring->curr_entry++;
1082 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1083 rx_ring->cnsmr_idx = 0;
1084 rx_ring->curr_entry = rx_ring->cq_base;
1085 }
1086}
1087
1088static void ql_write_cq_idx(struct rx_ring *rx_ring)
1089{
1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1091}
1092
Ron Mercer7c734352009-10-19 03:32:19 +00001093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001140/* Process (refill) a large buffer queue. */
1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1142{
Ron Mercer49f21862009-02-23 10:42:16 +00001143 u32 clean_idx = rx_ring->lbq_clean_idx;
1144 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001145 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001146 u64 map;
1147 int i;
1148
Ron Mercer7c734352009-10-19 03:32:19 +00001149 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 for (i = 0; i < 16; i++) {
1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1152 "lbq: try cleaning clean_idx = %d.\n",
1153 clean_idx);
1154 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1156 QPRINTK(qdev, IFUP, ERR,
1157 "Could not get a page chunk.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001158 return;
1159 }
Ron Mercer7c734352009-10-19 03:32:19 +00001160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
Ron Mercer7c734352009-10-19 03:32:19 +00001164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001166 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001184 QPRINTK(qdev, RX_STATUS, DEBUG,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
Ron Mercer49f21862009-02-23 10:42:16 +00001195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = 0; i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 QPRINTK(qdev, RX_STATUS, DEBUG,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 if (sbq_desc->p.skb == NULL) {
1208 QPRINTK(qdev, RX_STATUS, DEBUG,
1209 "sbq: getting new skb for index %d.\n",
1210 sbq_desc->index);
1211 sbq_desc->p.skb =
1212 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001213 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001214 if (sbq_desc->p.skb == NULL) {
1215 QPRINTK(qdev, PROBE, ERR,
1216 "Couldn't get an skb.\n");
1217 rx_ring->sbq_clean_idx = clean_idx;
1218 return;
1219 }
1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221 map = pci_map_single(qdev->pdev,
1222 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001223 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001225 if (pci_dma_mapping_error(qdev->pdev, map)) {
1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001232 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1233 pci_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001250 QPRINTK(qdev, RX_STATUS, DEBUG,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
1284 QPRINTK(qdev, TX_DONE, DEBUG,
1285 "unmapping OAL area.\n");
1286 }
1287 pci_unmap_single(qdev->pdev,
1288 pci_unmap_addr(&tx_ring_desc->map[i],
1289 mapaddr),
1290 pci_unmap_len(&tx_ring_desc->map[i],
1291 maplen),
1292 PCI_DMA_TODEVICE);
1293 } else {
1294 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1295 i);
1296 pci_unmap_page(qdev->pdev,
1297 pci_unmap_addr(&tx_ring_desc->map[i],
1298 mapaddr),
1299 pci_unmap_len(&tx_ring_desc->map[i],
1300 maplen), PCI_DMA_TODEVICE);
1301 }
1302 }
1303
1304}
1305
1306/* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308 */
1309static int ql_map_send(struct ql_adapter *qdev,
1310 struct ob_mac_iocb_req *mac_iocb_ptr,
1311 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312{
1313 int len = skb_headlen(skb);
1314 dma_addr_t map;
1315 int frag_idx, err, map_idx = 0;
1316 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319 if (frag_cnt) {
1320 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
1329 QPRINTK(qdev, TX_QUEUED, ERR,
1330 "PCI mapping failed with error: %d\n", err);
1331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
1337 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
1376 QPRINTK(qdev, TX_QUEUED, ERR,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
1379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
1391 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392 map);
1393 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
1399 map =
1400 pci_map_page(qdev->pdev, frag->page,
1401 frag->page_offset, frag->size,
1402 PCI_DMA_TODEVICE);
1403
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) {
1406 QPRINTK(qdev, TX_QUEUED, ERR,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
1409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(frag->size);
1414 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 frag->size);
1417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001436static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001437{
1438 void *temp_addr = skb->data;
1439
1440 /* Undo the skb_reserve(skb,32) we did before
1441 * giving to hardware, and realign data on
1442 * a 2-byte boundary.
1443 */
1444 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1445 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1446 skb_copy_to_linear_data(skb, temp_addr,
1447 (unsigned int)len);
1448}
1449
1450/*
1451 * This function builds an skb for the given inbound
1452 * completion. It will be rewritten for readability in the near
1453 * future, but for not it works well.
1454 */
1455static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1456 struct rx_ring *rx_ring,
1457 struct ib_mac_iocb_rsp *ib_mac_rsp)
1458{
1459 struct bq_desc *lbq_desc;
1460 struct bq_desc *sbq_desc;
1461 struct sk_buff *skb = NULL;
1462 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1463 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1464
1465 /*
1466 * Handle the header buffer if present.
1467 */
1468 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1469 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1470 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1471 /*
1472 * Headers fit nicely into a small buffer.
1473 */
1474 sbq_desc = ql_get_curr_sbuf(rx_ring);
1475 pci_unmap_single(qdev->pdev,
1476 pci_unmap_addr(sbq_desc, mapaddr),
1477 pci_unmap_len(sbq_desc, maplen),
1478 PCI_DMA_FROMDEVICE);
1479 skb = sbq_desc->p.skb;
1480 ql_realign_skb(skb, hdr_len);
1481 skb_put(skb, hdr_len);
1482 sbq_desc->p.skb = NULL;
1483 }
1484
1485 /*
1486 * Handle the data buffer(s).
1487 */
1488 if (unlikely(!length)) { /* Is there data too? */
1489 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "No Data buffer in this packet.\n");
1491 return skb;
1492 }
1493
1494 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1495 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1496 QPRINTK(qdev, RX_STATUS, DEBUG,
1497 "Headers in small, data of %d bytes in small, combine them.\n", length);
1498 /*
1499 * Data is less than small buffer size so it's
1500 * stuffed in a small buffer.
1501 * For this case we append the data
1502 * from the "data" small buffer to the "header" small
1503 * buffer.
1504 */
1505 sbq_desc = ql_get_curr_sbuf(rx_ring);
1506 pci_dma_sync_single_for_cpu(qdev->pdev,
1507 pci_unmap_addr
1508 (sbq_desc, mapaddr),
1509 pci_unmap_len
1510 (sbq_desc, maplen),
1511 PCI_DMA_FROMDEVICE);
1512 memcpy(skb_put(skb, length),
1513 sbq_desc->p.skb->data, length);
1514 pci_dma_sync_single_for_device(qdev->pdev,
1515 pci_unmap_addr
1516 (sbq_desc,
1517 mapaddr),
1518 pci_unmap_len
1519 (sbq_desc,
1520 maplen),
1521 PCI_DMA_FROMDEVICE);
1522 } else {
1523 QPRINTK(qdev, RX_STATUS, DEBUG,
1524 "%d bytes in a single small buffer.\n", length);
1525 sbq_desc = ql_get_curr_sbuf(rx_ring);
1526 skb = sbq_desc->p.skb;
1527 ql_realign_skb(skb, length);
1528 skb_put(skb, length);
1529 pci_unmap_single(qdev->pdev,
1530 pci_unmap_addr(sbq_desc,
1531 mapaddr),
1532 pci_unmap_len(sbq_desc,
1533 maplen),
1534 PCI_DMA_FROMDEVICE);
1535 sbq_desc->p.skb = NULL;
1536 }
1537 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1538 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1539 QPRINTK(qdev, RX_STATUS, DEBUG,
1540 "Header in small, %d bytes in large. Chain large to small!\n", length);
1541 /*
1542 * The data is in a single large buffer. We
1543 * chain it to the header buffer's skb and let
1544 * it rip.
1545 */
Ron Mercer7c734352009-10-19 03:32:19 +00001546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001547 QPRINTK(qdev, RX_STATUS, DEBUG,
Ron Mercer7c734352009-10-19 03:32:19 +00001548 "Chaining page at offset = %d,"
1549 "for %d bytes to skb.\n",
1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset,
1553 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001554 skb->len += length;
1555 skb->data_len += length;
1556 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001557 } else {
1558 /*
1559 * The headers and data are in a single large buffer. We
1560 * copy it to a new skb and let it go. This can happen with
1561 * jumbo mtu on a non-TCP/UDP frame.
1562 */
Ron Mercer7c734352009-10-19 03:32:19 +00001563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001564 skb = netdev_alloc_skb(qdev->ndev, length);
1565 if (skb == NULL) {
1566 QPRINTK(qdev, PROBE, DEBUG,
1567 "No skb available, drop the packet.\n");
1568 return NULL;
1569 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001570 pci_unmap_page(qdev->pdev,
1571 pci_unmap_addr(lbq_desc,
1572 mapaddr),
1573 pci_unmap_len(lbq_desc, maplen),
1574 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001575 skb_reserve(skb, NET_IP_ALIGN);
1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
Ron Mercer7c734352009-10-19 03:32:19 +00001578 skb_fill_page_desc(skb, 0,
1579 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset,
1581 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001582 skb->len += length;
1583 skb->data_len += length;
1584 skb->truesize += length;
1585 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001586 __pskb_pull_tail(skb,
1587 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1588 VLAN_ETH_HLEN : ETH_HLEN);
1589 }
1590 } else {
1591 /*
1592 * The data is in a chain of large buffers
1593 * pointed to by a small buffer. We loop
1594 * thru and chain them to the our small header
1595 * buffer's skb.
1596 * frags: There are 18 max frags and our small
1597 * buffer will hold 32 of them. The thing is,
1598 * we'll use 3 max for our 9000 byte jumbo
1599 * frames. If the MTU goes up we could
1600 * eventually be in trouble.
1601 */
Ron Mercer7c734352009-10-19 03:32:19 +00001602 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001603 sbq_desc = ql_get_curr_sbuf(rx_ring);
1604 pci_unmap_single(qdev->pdev,
1605 pci_unmap_addr(sbq_desc, mapaddr),
1606 pci_unmap_len(sbq_desc, maplen),
1607 PCI_DMA_FROMDEVICE);
1608 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1609 /*
1610 * This is an non TCP/UDP IP frame, so
1611 * the headers aren't split into a small
1612 * buffer. We have to use the small buffer
1613 * that contains our sg list as our skb to
1614 * send upstairs. Copy the sg list here to
1615 * a local buffer and use it to find the
1616 * pages to chain.
1617 */
1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1619 "%d bytes of headers & data in chain of large.\n", length);
1620 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001621 sbq_desc->p.skb = NULL;
1622 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001623 }
1624 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001625 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1626 size = (length < rx_ring->lbq_buf_size) ? length :
1627 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001628
1629 QPRINTK(qdev, RX_STATUS, DEBUG,
1630 "Adding page %d to skb for %d bytes.\n",
1631 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001632 skb_fill_page_desc(skb, i,
1633 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset,
1635 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001636 skb->len += size;
1637 skb->data_len += size;
1638 skb->truesize += size;
1639 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001640 i++;
1641 }
1642 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1643 VLAN_ETH_HLEN : ETH_HLEN);
1644 }
1645 return skb;
1646}
1647
1648/* Process an inbound completion from an rx ring. */
1649static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1650 struct rx_ring *rx_ring,
1651 struct ib_mac_iocb_rsp *ib_mac_rsp)
1652{
1653 struct net_device *ndev = qdev->ndev;
1654 struct sk_buff *skb = NULL;
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001655 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001657
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1659
1660 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1661 if (unlikely(!skb)) {
1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1663 "No skb available, drop packet.\n");
1664 return;
1665 }
1666
Ron Mercera32959c2009-06-09 05:39:27 +00001667 /* Frame error, so drop the packet. */
1668 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1669 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1670 ib_mac_rsp->flags2);
1671 dev_kfree_skb_any(skb);
1672 return;
1673 }
Ron Mercerec33a492009-06-09 05:39:28 +00001674
1675 /* The max framesize filter on this chip is set higher than
1676 * MTU since FCoE uses 2k frames.
1677 */
1678 if (skb->len > ndev->mtu + ETH_HLEN) {
1679 dev_kfree_skb_any(skb);
1680 return;
1681 }
1682
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001683 prefetch(skb->data);
1684 skb->dev = ndev;
1685 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1686 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1687 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1688 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1689 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1690 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1691 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1692 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1693 }
1694 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1695 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1696 }
Ron Mercerd555f592009-03-09 10:59:19 +00001697
Ron Mercerd555f592009-03-09 10:59:19 +00001698 skb->protocol = eth_type_trans(skb, ndev);
1699 skb->ip_summed = CHECKSUM_NONE;
1700
1701 /* If rx checksum is on, and there are no
1702 * csum or frame errors.
1703 */
1704 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001705 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1706 /* TCP frame. */
1707 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1708 QPRINTK(qdev, RX_STATUS, DEBUG,
1709 "TCP checksum done!\n");
1710 skb->ip_summed = CHECKSUM_UNNECESSARY;
1711 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1712 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1713 /* Unfragmented ipv4 UDP frame. */
1714 struct iphdr *iph = (struct iphdr *) skb->data;
1715 if (!(iph->frag_off &
1716 cpu_to_be16(IP_MF|IP_OFFSET))) {
1717 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 QPRINTK(qdev, RX_STATUS, DEBUG,
1719 "TCP checksum done!\n");
1720 }
1721 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001722 }
Ron Mercerd555f592009-03-09 10:59:19 +00001723
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001724 ndev->stats.rx_packets++;
1725 ndev->stats.rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001726 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001727 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1728 if (qdev->vlgrp &&
1729 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1730 (vlan_id != 0))
1731 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1732 vlan_id, skb);
1733 else
1734 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001735 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001736 if (qdev->vlgrp &&
1737 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1738 (vlan_id != 0))
1739 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1740 else
1741 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001742 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001743}
1744
1745/* Process an outbound completion from an rx ring. */
1746static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1747 struct ob_mac_iocb_rsp *mac_rsp)
1748{
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001749 struct net_device *ndev = qdev->ndev;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 struct tx_ring *tx_ring;
1751 struct tx_ring_desc *tx_ring_desc;
1752
1753 QL_DUMP_OB_MAC_RSP(mac_rsp);
1754 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1755 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1756 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001757 ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1758 ndev->stats.tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001759 dev_kfree_skb(tx_ring_desc->skb);
1760 tx_ring_desc->skb = NULL;
1761
1762 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1763 OB_MAC_IOCB_RSP_S |
1764 OB_MAC_IOCB_RSP_L |
1765 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1766 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1767 QPRINTK(qdev, TX_DONE, WARNING,
1768 "Total descriptor length did not match transfer length.\n");
1769 }
1770 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1771 QPRINTK(qdev, TX_DONE, WARNING,
1772 "Frame too short to be legal, not sent.\n");
1773 }
1774 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1775 QPRINTK(qdev, TX_DONE, WARNING,
1776 "Frame too long, but sent anyway.\n");
1777 }
1778 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1779 QPRINTK(qdev, TX_DONE, WARNING,
1780 "PCI backplane error. Frame not sent.\n");
1781 }
1782 }
1783 atomic_inc(&tx_ring->tx_count);
1784}
1785
1786/* Fire up a handler to reset the MPI processor. */
1787void ql_queue_fw_error(struct ql_adapter *qdev)
1788{
Ron Mercer6a473302009-07-02 06:06:12 +00001789 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001790 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1791}
1792
1793void ql_queue_asic_error(struct ql_adapter *qdev)
1794{
Ron Mercer6a473302009-07-02 06:06:12 +00001795 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001797 /* Clear adapter up bit to signal the recovery
1798 * process that it shouldn't kill the reset worker
1799 * thread
1800 */
1801 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001802 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1803}
1804
1805static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1806 struct ib_ae_iocb_rsp *ib_ae_rsp)
1807{
1808 switch (ib_ae_rsp->event) {
1809 case MGMT_ERR_EVENT:
1810 QPRINTK(qdev, RX_ERR, ERR,
1811 "Management Processor Fatal Error.\n");
1812 ql_queue_fw_error(qdev);
1813 return;
1814
1815 case CAM_LOOKUP_ERR_EVENT:
1816 QPRINTK(qdev, LINK, ERR,
1817 "Multiple CAM hits lookup occurred.\n");
1818 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1819 ql_queue_asic_error(qdev);
1820 return;
1821
1822 case SOFT_ECC_ERROR_EVENT:
1823 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1824 ql_queue_asic_error(qdev);
1825 break;
1826
1827 case PCI_ERR_ANON_BUF_RD:
1828 QPRINTK(qdev, RX_ERR, ERR,
1829 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1830 ib_ae_rsp->q_id);
1831 ql_queue_asic_error(qdev);
1832 break;
1833
1834 default:
1835 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1836 ib_ae_rsp->event);
1837 ql_queue_asic_error(qdev);
1838 break;
1839 }
1840}
1841
1842static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1843{
1844 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001845 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001846 struct ob_mac_iocb_rsp *net_rsp = NULL;
1847 int count = 0;
1848
Ron Mercer1e213302009-03-09 10:59:21 +00001849 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001850 /* While there are entries in the completion queue. */
1851 while (prod != rx_ring->cnsmr_idx) {
1852
1853 QPRINTK(qdev, RX_STATUS, DEBUG,
1854 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1855 prod, rx_ring->cnsmr_idx);
1856
1857 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1858 rmb();
1859 switch (net_rsp->opcode) {
1860
1861 case OPCODE_OB_MAC_TSO_IOCB:
1862 case OPCODE_OB_MAC_IOCB:
1863 ql_process_mac_tx_intr(qdev, net_rsp);
1864 break;
1865 default:
1866 QPRINTK(qdev, RX_STATUS, DEBUG,
1867 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1868 net_rsp->opcode);
1869 }
1870 count++;
1871 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001872 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001873 }
1874 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00001875 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1876 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1877 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 if (atomic_read(&tx_ring->queue_stopped) &&
1879 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1880 /*
1881 * The queue got stopped because the tx_ring was full.
1882 * Wake it up, because it's now at least 25% empty.
1883 */
Ron Mercer1e213302009-03-09 10:59:21 +00001884 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001885 }
1886
1887 return count;
1888}
1889
1890static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1891{
1892 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001893 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001894 struct ql_net_rsp_iocb *net_rsp;
1895 int count = 0;
1896
1897 /* While there are entries in the completion queue. */
1898 while (prod != rx_ring->cnsmr_idx) {
1899
1900 QPRINTK(qdev, RX_STATUS, DEBUG,
1901 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1902 prod, rx_ring->cnsmr_idx);
1903
1904 net_rsp = rx_ring->curr_entry;
1905 rmb();
1906 switch (net_rsp->opcode) {
1907 case OPCODE_IB_MAC_IOCB:
1908 ql_process_mac_rx_intr(qdev, rx_ring,
1909 (struct ib_mac_iocb_rsp *)
1910 net_rsp);
1911 break;
1912
1913 case OPCODE_IB_AE_IOCB:
1914 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1915 net_rsp);
1916 break;
1917 default:
1918 {
1919 QPRINTK(qdev, RX_STATUS, DEBUG,
1920 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1921 net_rsp->opcode);
1922 }
1923 }
1924 count++;
1925 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001926 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001927 if (count == budget)
1928 break;
1929 }
1930 ql_update_buffer_queues(qdev, rx_ring);
1931 ql_write_cq_idx(rx_ring);
1932 return count;
1933}
1934
1935static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1936{
1937 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1938 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00001939 struct rx_ring *trx_ring;
1940 int i, work_done = 0;
1941 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942
1943 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1944 rx_ring->cq_id);
1945
Ron Mercer39aa8162009-08-27 11:02:11 +00001946 /* Service the TX rings first. They start
1947 * right after the RSS rings. */
1948 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1949 trx_ring = &qdev->rx_ring[i];
1950 /* If this TX completion ring belongs to this vector and
1951 * it's not empty then service it.
1952 */
1953 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1954 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1955 trx_ring->cnsmr_idx)) {
1956 QPRINTK(qdev, INTR, DEBUG,
1957 "%s: Servicing TX completion ring %d.\n",
1958 __func__, trx_ring->cq_id);
1959 ql_clean_outbound_rx_ring(trx_ring);
1960 }
1961 }
1962
1963 /*
1964 * Now service the RSS ring if it's active.
1965 */
1966 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1967 rx_ring->cnsmr_idx) {
1968 QPRINTK(qdev, INTR, DEBUG,
1969 "%s: Servicing RX completion ring %d.\n",
1970 __func__, rx_ring->cq_id);
1971 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1972 }
1973
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001974 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001975 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001976 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1977 }
1978 return work_done;
1979}
1980
1981static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1982{
1983 struct ql_adapter *qdev = netdev_priv(ndev);
1984
1985 qdev->vlgrp = grp;
1986 if (grp) {
1987 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1988 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1989 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1990 } else {
1991 QPRINTK(qdev, IFUP, DEBUG,
1992 "Turning off VLAN in NIC_RCV_CFG.\n");
1993 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1994 }
1995}
1996
1997static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1998{
1999 struct ql_adapter *qdev = netdev_priv(ndev);
2000 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002001 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002002
Ron Mercercc288f52009-02-23 10:42:14 +00002003 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2004 if (status)
2005 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002006 if (ql_set_mac_addr_reg
2007 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2008 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2009 }
Ron Mercercc288f52009-02-23 10:42:14 +00002010 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002011}
2012
2013static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2014{
2015 struct ql_adapter *qdev = netdev_priv(ndev);
2016 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002017 int status;
2018
2019 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2020 if (status)
2021 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002022
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002023 if (ql_set_mac_addr_reg
2024 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2025 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2026 }
Ron Mercercc288f52009-02-23 10:42:14 +00002027 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002028
2029}
2030
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002031/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2032static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2033{
2034 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002035 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002036 return IRQ_HANDLED;
2037}
2038
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002039/* This handles a fatal error, MPI activity, and the default
2040 * rx_ring in an MSI-X multiple vector environment.
2041 * In MSI/Legacy environment it also process the rest of
2042 * the rx_rings.
2043 */
2044static irqreturn_t qlge_isr(int irq, void *dev_id)
2045{
2046 struct rx_ring *rx_ring = dev_id;
2047 struct ql_adapter *qdev = rx_ring->qdev;
2048 struct intr_context *intr_context = &qdev->intr_context[0];
2049 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002050 int work_done = 0;
2051
Ron Mercerbb0d2152008-10-20 10:30:26 -07002052 spin_lock(&qdev->hw_lock);
2053 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2054 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2055 spin_unlock(&qdev->hw_lock);
2056 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002057 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002058 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002059
Ron Mercerbb0d2152008-10-20 10:30:26 -07002060 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002061
2062 /*
2063 * Check for fatal error.
2064 */
2065 if (var & STS_FE) {
2066 ql_queue_asic_error(qdev);
2067 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2068 var = ql_read32(qdev, ERR_STS);
2069 QPRINTK(qdev, INTR, ERR,
2070 "Resetting chip. Error Status Register = 0x%x\n", var);
2071 return IRQ_HANDLED;
2072 }
2073
2074 /*
2075 * Check MPI processor activity.
2076 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002077 if ((var & STS_PI) &&
2078 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002079 /*
2080 * We've got an async event or mailbox completion.
2081 * Handle it and clear the source of the interrupt.
2082 */
2083 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2084 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002085 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2086 queue_delayed_work_on(smp_processor_id(),
2087 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002088 work_done++;
2089 }
2090
2091 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002092 * Get the bit-mask that shows the active queues for this
2093 * pass. Compare it to the queues that this irq services
2094 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002095 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002096 var = ql_read32(qdev, ISR1);
2097 if (var & intr_context->irq_mask) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002098 QPRINTK(qdev, INTR, INFO,
Ron Mercer39aa8162009-08-27 11:02:11 +00002099 "Waking handler for rx_ring[0].\n");
2100 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ben Hutchings288379f2009-01-19 16:43:59 -08002101 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002102 work_done++;
2103 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002104 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002105 return work_done ? IRQ_HANDLED : IRQ_NONE;
2106}
2107
2108static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2109{
2110
2111 if (skb_is_gso(skb)) {
2112 int err;
2113 if (skb_header_cloned(skb)) {
2114 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2115 if (err)
2116 return err;
2117 }
2118
2119 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2120 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2121 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2122 mac_iocb_ptr->total_hdrs_len =
2123 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2124 mac_iocb_ptr->net_trans_offset =
2125 cpu_to_le16(skb_network_offset(skb) |
2126 skb_transport_offset(skb)
2127 << OB_MAC_TRANSPORT_HDR_SHIFT);
2128 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2129 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2130 if (likely(skb->protocol == htons(ETH_P_IP))) {
2131 struct iphdr *iph = ip_hdr(skb);
2132 iph->check = 0;
2133 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2134 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2135 iph->daddr, 0,
2136 IPPROTO_TCP,
2137 0);
2138 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2139 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2140 tcp_hdr(skb)->check =
2141 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2142 &ipv6_hdr(skb)->daddr,
2143 0, IPPROTO_TCP, 0);
2144 }
2145 return 1;
2146 }
2147 return 0;
2148}
2149
2150static void ql_hw_csum_setup(struct sk_buff *skb,
2151 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2152{
2153 int len;
2154 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002155 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002156 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2157 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2158 mac_iocb_ptr->net_trans_offset =
2159 cpu_to_le16(skb_network_offset(skb) |
2160 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2161
2162 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2163 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2164 if (likely(iph->protocol == IPPROTO_TCP)) {
2165 check = &(tcp_hdr(skb)->check);
2166 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2167 mac_iocb_ptr->total_hdrs_len =
2168 cpu_to_le16(skb_transport_offset(skb) +
2169 (tcp_hdr(skb)->doff << 2));
2170 } else {
2171 check = &(udp_hdr(skb)->check);
2172 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2173 mac_iocb_ptr->total_hdrs_len =
2174 cpu_to_le16(skb_transport_offset(skb) +
2175 sizeof(struct udphdr));
2176 }
2177 *check = ~csum_tcpudp_magic(iph->saddr,
2178 iph->daddr, len, iph->protocol, 0);
2179}
2180
Stephen Hemminger613573252009-08-31 19:50:58 +00002181static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002182{
2183 struct tx_ring_desc *tx_ring_desc;
2184 struct ob_mac_iocb_req *mac_iocb_ptr;
2185 struct ql_adapter *qdev = netdev_priv(ndev);
2186 int tso;
2187 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002188 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002189
2190 tx_ring = &qdev->tx_ring[tx_ring_idx];
2191
Ron Mercer74c50b42009-03-09 10:59:27 +00002192 if (skb_padto(skb, ETH_ZLEN))
2193 return NETDEV_TX_OK;
2194
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002195 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2196 QPRINTK(qdev, TX_QUEUED, INFO,
2197 "%s: shutting down tx queue %d du to lack of resources.\n",
2198 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002199 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002200 atomic_inc(&tx_ring->queue_stopped);
2201 return NETDEV_TX_BUSY;
2202 }
2203 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2204 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002205 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002206
2207 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2208 mac_iocb_ptr->tid = tx_ring_desc->index;
2209 /* We use the upper 32-bits to store the tx queue for this IO.
2210 * When we get the completion we can use it to establish the context.
2211 */
2212 mac_iocb_ptr->txq_idx = tx_ring_idx;
2213 tx_ring_desc->skb = skb;
2214
2215 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2216
2217 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2218 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2219 vlan_tx_tag_get(skb));
2220 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2221 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2222 }
2223 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2224 if (tso < 0) {
2225 dev_kfree_skb_any(skb);
2226 return NETDEV_TX_OK;
2227 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2228 ql_hw_csum_setup(skb,
2229 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2230 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002231 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2232 NETDEV_TX_OK) {
2233 QPRINTK(qdev, TX_QUEUED, ERR,
2234 "Could not map the segments.\n");
2235 return NETDEV_TX_BUSY;
2236 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002237 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2238 tx_ring->prod_idx++;
2239 if (tx_ring->prod_idx == tx_ring->wq_len)
2240 tx_ring->prod_idx = 0;
2241 wmb();
2242
2243 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002244 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2245 tx_ring->prod_idx, skb->len);
2246
2247 atomic_dec(&tx_ring->tx_count);
2248 return NETDEV_TX_OK;
2249}
2250
2251static void ql_free_shadow_space(struct ql_adapter *qdev)
2252{
2253 if (qdev->rx_ring_shadow_reg_area) {
2254 pci_free_consistent(qdev->pdev,
2255 PAGE_SIZE,
2256 qdev->rx_ring_shadow_reg_area,
2257 qdev->rx_ring_shadow_reg_dma);
2258 qdev->rx_ring_shadow_reg_area = NULL;
2259 }
2260 if (qdev->tx_ring_shadow_reg_area) {
2261 pci_free_consistent(qdev->pdev,
2262 PAGE_SIZE,
2263 qdev->tx_ring_shadow_reg_area,
2264 qdev->tx_ring_shadow_reg_dma);
2265 qdev->tx_ring_shadow_reg_area = NULL;
2266 }
2267}
2268
2269static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2270{
2271 qdev->rx_ring_shadow_reg_area =
2272 pci_alloc_consistent(qdev->pdev,
2273 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2274 if (qdev->rx_ring_shadow_reg_area == NULL) {
2275 QPRINTK(qdev, IFUP, ERR,
2276 "Allocation of RX shadow space failed.\n");
2277 return -ENOMEM;
2278 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002279 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002280 qdev->tx_ring_shadow_reg_area =
2281 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2282 &qdev->tx_ring_shadow_reg_dma);
2283 if (qdev->tx_ring_shadow_reg_area == NULL) {
2284 QPRINTK(qdev, IFUP, ERR,
2285 "Allocation of TX shadow space failed.\n");
2286 goto err_wqp_sh_area;
2287 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002288 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002289 return 0;
2290
2291err_wqp_sh_area:
2292 pci_free_consistent(qdev->pdev,
2293 PAGE_SIZE,
2294 qdev->rx_ring_shadow_reg_area,
2295 qdev->rx_ring_shadow_reg_dma);
2296 return -ENOMEM;
2297}
2298
2299static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2300{
2301 struct tx_ring_desc *tx_ring_desc;
2302 int i;
2303 struct ob_mac_iocb_req *mac_iocb_ptr;
2304
2305 mac_iocb_ptr = tx_ring->wq_base;
2306 tx_ring_desc = tx_ring->q;
2307 for (i = 0; i < tx_ring->wq_len; i++) {
2308 tx_ring_desc->index = i;
2309 tx_ring_desc->skb = NULL;
2310 tx_ring_desc->queue_entry = mac_iocb_ptr;
2311 mac_iocb_ptr++;
2312 tx_ring_desc++;
2313 }
2314 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2315 atomic_set(&tx_ring->queue_stopped, 0);
2316}
2317
2318static void ql_free_tx_resources(struct ql_adapter *qdev,
2319 struct tx_ring *tx_ring)
2320{
2321 if (tx_ring->wq_base) {
2322 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2323 tx_ring->wq_base, tx_ring->wq_base_dma);
2324 tx_ring->wq_base = NULL;
2325 }
2326 kfree(tx_ring->q);
2327 tx_ring->q = NULL;
2328}
2329
2330static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2331 struct tx_ring *tx_ring)
2332{
2333 tx_ring->wq_base =
2334 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2335 &tx_ring->wq_base_dma);
2336
2337 if ((tx_ring->wq_base == NULL)
Ron Mercer88c55e32009-06-10 15:49:33 +00002338 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002339 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2340 return -ENOMEM;
2341 }
2342 tx_ring->q =
2343 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2344 if (tx_ring->q == NULL)
2345 goto err;
2346
2347 return 0;
2348err:
2349 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2350 tx_ring->wq_base, tx_ring->wq_base_dma);
2351 return -ENOMEM;
2352}
2353
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002354static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002355{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002356 struct bq_desc *lbq_desc;
2357
Ron Mercer7c734352009-10-19 03:32:19 +00002358 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002359
Ron Mercer7c734352009-10-19 03:32:19 +00002360 curr_idx = rx_ring->lbq_curr_idx;
2361 clean_idx = rx_ring->lbq_clean_idx;
2362 while (curr_idx != clean_idx) {
2363 lbq_desc = &rx_ring->lbq[curr_idx];
2364
2365 if (lbq_desc->p.pg_chunk.last_flag) {
2366 pci_unmap_page(qdev->pdev,
2367 lbq_desc->p.pg_chunk.map,
2368 ql_lbq_block_size(qdev),
2369 PCI_DMA_FROMDEVICE);
2370 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002371 }
Ron Mercer7c734352009-10-19 03:32:19 +00002372
2373 put_page(lbq_desc->p.pg_chunk.page);
2374 lbq_desc->p.pg_chunk.page = NULL;
2375
2376 if (++curr_idx == rx_ring->lbq_len)
2377 curr_idx = 0;
2378
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002379 }
2380}
2381
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002382static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002383{
2384 int i;
2385 struct bq_desc *sbq_desc;
2386
2387 for (i = 0; i < rx_ring->sbq_len; i++) {
2388 sbq_desc = &rx_ring->sbq[i];
2389 if (sbq_desc == NULL) {
2390 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2391 return;
2392 }
2393 if (sbq_desc->p.skb) {
2394 pci_unmap_single(qdev->pdev,
2395 pci_unmap_addr(sbq_desc, mapaddr),
2396 pci_unmap_len(sbq_desc, maplen),
2397 PCI_DMA_FROMDEVICE);
2398 dev_kfree_skb(sbq_desc->p.skb);
2399 sbq_desc->p.skb = NULL;
2400 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002401 }
2402}
2403
Ron Mercer4545a3f2009-02-23 10:42:17 +00002404/* Free all large and small rx buffers associated
2405 * with the completion queues for this device.
2406 */
2407static void ql_free_rx_buffers(struct ql_adapter *qdev)
2408{
2409 int i;
2410 struct rx_ring *rx_ring;
2411
2412 for (i = 0; i < qdev->rx_ring_count; i++) {
2413 rx_ring = &qdev->rx_ring[i];
2414 if (rx_ring->lbq)
2415 ql_free_lbq_buffers(qdev, rx_ring);
2416 if (rx_ring->sbq)
2417 ql_free_sbq_buffers(qdev, rx_ring);
2418 }
2419}
2420
2421static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2422{
2423 struct rx_ring *rx_ring;
2424 int i;
2425
2426 for (i = 0; i < qdev->rx_ring_count; i++) {
2427 rx_ring = &qdev->rx_ring[i];
2428 if (rx_ring->type != TX_Q)
2429 ql_update_buffer_queues(qdev, rx_ring);
2430 }
2431}
2432
2433static void ql_init_lbq_ring(struct ql_adapter *qdev,
2434 struct rx_ring *rx_ring)
2435{
2436 int i;
2437 struct bq_desc *lbq_desc;
2438 __le64 *bq = rx_ring->lbq_base;
2439
2440 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2441 for (i = 0; i < rx_ring->lbq_len; i++) {
2442 lbq_desc = &rx_ring->lbq[i];
2443 memset(lbq_desc, 0, sizeof(*lbq_desc));
2444 lbq_desc->index = i;
2445 lbq_desc->addr = bq;
2446 bq++;
2447 }
2448}
2449
2450static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002451 struct rx_ring *rx_ring)
2452{
2453 int i;
2454 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002455 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002456
Ron Mercer4545a3f2009-02-23 10:42:17 +00002457 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002458 for (i = 0; i < rx_ring->sbq_len; i++) {
2459 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002460 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002461 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002462 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002463 bq++;
2464 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002465}
2466
2467static void ql_free_rx_resources(struct ql_adapter *qdev,
2468 struct rx_ring *rx_ring)
2469{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002470 /* Free the small buffer queue. */
2471 if (rx_ring->sbq_base) {
2472 pci_free_consistent(qdev->pdev,
2473 rx_ring->sbq_size,
2474 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2475 rx_ring->sbq_base = NULL;
2476 }
2477
2478 /* Free the small buffer queue control blocks. */
2479 kfree(rx_ring->sbq);
2480 rx_ring->sbq = NULL;
2481
2482 /* Free the large buffer queue. */
2483 if (rx_ring->lbq_base) {
2484 pci_free_consistent(qdev->pdev,
2485 rx_ring->lbq_size,
2486 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2487 rx_ring->lbq_base = NULL;
2488 }
2489
2490 /* Free the large buffer queue control blocks. */
2491 kfree(rx_ring->lbq);
2492 rx_ring->lbq = NULL;
2493
2494 /* Free the rx queue. */
2495 if (rx_ring->cq_base) {
2496 pci_free_consistent(qdev->pdev,
2497 rx_ring->cq_size,
2498 rx_ring->cq_base, rx_ring->cq_base_dma);
2499 rx_ring->cq_base = NULL;
2500 }
2501}
2502
2503/* Allocate queues and buffers for this completions queue based
2504 * on the values in the parameter structure. */
2505static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2506 struct rx_ring *rx_ring)
2507{
2508
2509 /*
2510 * Allocate the completion queue for this rx_ring.
2511 */
2512 rx_ring->cq_base =
2513 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2514 &rx_ring->cq_base_dma);
2515
2516 if (rx_ring->cq_base == NULL) {
2517 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2518 return -ENOMEM;
2519 }
2520
2521 if (rx_ring->sbq_len) {
2522 /*
2523 * Allocate small buffer queue.
2524 */
2525 rx_ring->sbq_base =
2526 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2527 &rx_ring->sbq_base_dma);
2528
2529 if (rx_ring->sbq_base == NULL) {
2530 QPRINTK(qdev, IFUP, ERR,
2531 "Small buffer queue allocation failed.\n");
2532 goto err_mem;
2533 }
2534
2535 /*
2536 * Allocate small buffer queue control blocks.
2537 */
2538 rx_ring->sbq =
2539 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2540 GFP_KERNEL);
2541 if (rx_ring->sbq == NULL) {
2542 QPRINTK(qdev, IFUP, ERR,
2543 "Small buffer queue control block allocation failed.\n");
2544 goto err_mem;
2545 }
2546
Ron Mercer4545a3f2009-02-23 10:42:17 +00002547 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002548 }
2549
2550 if (rx_ring->lbq_len) {
2551 /*
2552 * Allocate large buffer queue.
2553 */
2554 rx_ring->lbq_base =
2555 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2556 &rx_ring->lbq_base_dma);
2557
2558 if (rx_ring->lbq_base == NULL) {
2559 QPRINTK(qdev, IFUP, ERR,
2560 "Large buffer queue allocation failed.\n");
2561 goto err_mem;
2562 }
2563 /*
2564 * Allocate large buffer queue control blocks.
2565 */
2566 rx_ring->lbq =
2567 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2568 GFP_KERNEL);
2569 if (rx_ring->lbq == NULL) {
2570 QPRINTK(qdev, IFUP, ERR,
2571 "Large buffer queue control block allocation failed.\n");
2572 goto err_mem;
2573 }
2574
Ron Mercer4545a3f2009-02-23 10:42:17 +00002575 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002576 }
2577
2578 return 0;
2579
2580err_mem:
2581 ql_free_rx_resources(qdev, rx_ring);
2582 return -ENOMEM;
2583}
2584
2585static void ql_tx_ring_clean(struct ql_adapter *qdev)
2586{
2587 struct tx_ring *tx_ring;
2588 struct tx_ring_desc *tx_ring_desc;
2589 int i, j;
2590
2591 /*
2592 * Loop through all queues and free
2593 * any resources.
2594 */
2595 for (j = 0; j < qdev->tx_ring_count; j++) {
2596 tx_ring = &qdev->tx_ring[j];
2597 for (i = 0; i < tx_ring->wq_len; i++) {
2598 tx_ring_desc = &tx_ring->q[i];
2599 if (tx_ring_desc && tx_ring_desc->skb) {
2600 QPRINTK(qdev, IFDOWN, ERR,
2601 "Freeing lost SKB %p, from queue %d, index %d.\n",
2602 tx_ring_desc->skb, j,
2603 tx_ring_desc->index);
2604 ql_unmap_send(qdev, tx_ring_desc,
2605 tx_ring_desc->map_cnt);
2606 dev_kfree_skb(tx_ring_desc->skb);
2607 tx_ring_desc->skb = NULL;
2608 }
2609 }
2610 }
2611}
2612
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002613static void ql_free_mem_resources(struct ql_adapter *qdev)
2614{
2615 int i;
2616
2617 for (i = 0; i < qdev->tx_ring_count; i++)
2618 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2619 for (i = 0; i < qdev->rx_ring_count; i++)
2620 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2621 ql_free_shadow_space(qdev);
2622}
2623
2624static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2625{
2626 int i;
2627
2628 /* Allocate space for our shadow registers and such. */
2629 if (ql_alloc_shadow_space(qdev))
2630 return -ENOMEM;
2631
2632 for (i = 0; i < qdev->rx_ring_count; i++) {
2633 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2634 QPRINTK(qdev, IFUP, ERR,
2635 "RX resource allocation failed.\n");
2636 goto err_mem;
2637 }
2638 }
2639 /* Allocate tx queue resources */
2640 for (i = 0; i < qdev->tx_ring_count; i++) {
2641 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2642 QPRINTK(qdev, IFUP, ERR,
2643 "TX resource allocation failed.\n");
2644 goto err_mem;
2645 }
2646 }
2647 return 0;
2648
2649err_mem:
2650 ql_free_mem_resources(qdev);
2651 return -ENOMEM;
2652}
2653
2654/* Set up the rx ring control block and pass it to the chip.
2655 * The control block is defined as
2656 * "Completion Queue Initialization Control Block", or cqicb.
2657 */
2658static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2659{
2660 struct cqicb *cqicb = &rx_ring->cqicb;
2661 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002662 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002663 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002664 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002665 void __iomem *doorbell_area =
2666 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2667 int err = 0;
2668 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002669 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002670 __le64 *base_indirect_ptr;
2671 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002672
2673 /* Set up the shadow registers for this ring. */
2674 rx_ring->prod_idx_sh_reg = shadow_reg;
2675 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00002676 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002677 shadow_reg += sizeof(u64);
2678 shadow_reg_dma += sizeof(u64);
2679 rx_ring->lbq_base_indirect = shadow_reg;
2680 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002681 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2682 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002683 rx_ring->sbq_base_indirect = shadow_reg;
2684 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2685
2686 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002687 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002688 rx_ring->cnsmr_idx = 0;
2689 rx_ring->curr_entry = rx_ring->cq_base;
2690
2691 /* PCI doorbell mem area + 0x04 for valid register */
2692 rx_ring->valid_db_reg = doorbell_area + 0x04;
2693
2694 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002695 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002696
2697 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002698 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002699
2700 memset((void *)cqicb, 0, sizeof(struct cqicb));
2701 cqicb->msix_vect = rx_ring->irq;
2702
Ron Mercer459caf52009-01-04 17:08:11 -08002703 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2704 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002705
Ron Mercer97345522009-01-09 11:31:50 +00002706 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002707
Ron Mercer97345522009-01-09 11:31:50 +00002708 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002709
2710 /*
2711 * Set up the control block load flags.
2712 */
2713 cqicb->flags = FLAGS_LC | /* Load queue base address */
2714 FLAGS_LV | /* Load MSI-X vector */
2715 FLAGS_LI; /* Load irq delay values */
2716 if (rx_ring->lbq_len) {
2717 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002718 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002719 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2720 page_entries = 0;
2721 do {
2722 *base_indirect_ptr = cpu_to_le64(tmp);
2723 tmp += DB_PAGE_SIZE;
2724 base_indirect_ptr++;
2725 page_entries++;
2726 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002727 cqicb->lbq_addr =
2728 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002729 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2730 (u16) rx_ring->lbq_buf_size;
2731 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2732 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2733 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002734 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002735 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002736 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002737 rx_ring->lbq_clean_idx = 0;
2738 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002739 }
2740 if (rx_ring->sbq_len) {
2741 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002742 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002743 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2744 page_entries = 0;
2745 do {
2746 *base_indirect_ptr = cpu_to_le64(tmp);
2747 tmp += DB_PAGE_SIZE;
2748 base_indirect_ptr++;
2749 page_entries++;
2750 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002751 cqicb->sbq_addr =
2752 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002753 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00002754 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08002755 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2756 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002757 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002758 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002759 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002760 rx_ring->sbq_clean_idx = 0;
2761 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002762 }
2763 switch (rx_ring->type) {
2764 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002765 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2766 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2767 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002768 case RX_Q:
2769 /* Inbound completion handling rx_rings run in
2770 * separate NAPI contexts.
2771 */
2772 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2773 64);
2774 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2775 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2776 break;
2777 default:
2778 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2779 rx_ring->type);
2780 }
Ron Mercer49740972009-02-26 10:08:36 +00002781 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002782 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2783 CFG_LCQ, rx_ring->cq_id);
2784 if (err) {
2785 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2786 return err;
2787 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002788 return err;
2789}
2790
2791static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2792{
2793 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2794 void __iomem *doorbell_area =
2795 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2796 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2797 (tx_ring->wq_id * sizeof(u64));
2798 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2799 (tx_ring->wq_id * sizeof(u64));
2800 int err = 0;
2801
2802 /*
2803 * Assign doorbell registers for this tx_ring.
2804 */
2805 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002806 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002807 tx_ring->prod_idx = 0;
2808 /* TX PCI doorbell mem area + 0x04 */
2809 tx_ring->valid_db_reg = doorbell_area + 0x04;
2810
2811 /*
2812 * Assign shadow registers for this tx_ring.
2813 */
2814 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2815 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2816
2817 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2818 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2819 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2820 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2821 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002822 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002823
Ron Mercer97345522009-01-09 11:31:50 +00002824 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002825
2826 ql_init_tx_ring(qdev, tx_ring);
2827
Ron Mercere3324712009-07-02 06:06:13 +00002828 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002829 (u16) tx_ring->wq_id);
2830 if (err) {
2831 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2832 return err;
2833 }
Ron Mercer49740972009-02-26 10:08:36 +00002834 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002835 return err;
2836}
2837
2838static void ql_disable_msix(struct ql_adapter *qdev)
2839{
2840 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2841 pci_disable_msix(qdev->pdev);
2842 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2843 kfree(qdev->msi_x_entry);
2844 qdev->msi_x_entry = NULL;
2845 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2846 pci_disable_msi(qdev->pdev);
2847 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2848 }
2849}
2850
Ron Mercera4ab6132009-08-27 11:02:10 +00002851/* We start by trying to get the number of vectors
2852 * stored in qdev->intr_count. If we don't get that
2853 * many then we reduce the count and try again.
2854 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002855static void ql_enable_msix(struct ql_adapter *qdev)
2856{
Ron Mercera4ab6132009-08-27 11:02:10 +00002857 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002858
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002859 /* Get the MSIX vectors. */
2860 if (irq_type == MSIX_IRQ) {
2861 /* Try to alloc space for the msix struct,
2862 * if it fails then go to MSI/legacy.
2863 */
Ron Mercera4ab6132009-08-27 11:02:10 +00002864 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002865 sizeof(struct msix_entry),
2866 GFP_KERNEL);
2867 if (!qdev->msi_x_entry) {
2868 irq_type = MSI_IRQ;
2869 goto msi;
2870 }
2871
Ron Mercera4ab6132009-08-27 11:02:10 +00002872 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002873 qdev->msi_x_entry[i].entry = i;
2874
Ron Mercera4ab6132009-08-27 11:02:10 +00002875 /* Loop to get our vectors. We start with
2876 * what we want and settle for what we get.
2877 */
2878 do {
2879 err = pci_enable_msix(qdev->pdev,
2880 qdev->msi_x_entry, qdev->intr_count);
2881 if (err > 0)
2882 qdev->intr_count = err;
2883 } while (err > 0);
2884
2885 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002886 kfree(qdev->msi_x_entry);
2887 qdev->msi_x_entry = NULL;
2888 QPRINTK(qdev, IFUP, WARNING,
2889 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00002890 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002891 irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00002892 } else if (err == 0) {
2893 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2894 QPRINTK(qdev, IFUP, INFO,
2895 "MSI-X Enabled, got %d vectors.\n",
2896 qdev->intr_count);
2897 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002898 }
2899 }
2900msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00002901 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002902 if (irq_type == MSI_IRQ) {
2903 if (!pci_enable_msi(qdev->pdev)) {
2904 set_bit(QL_MSI_ENABLED, &qdev->flags);
2905 QPRINTK(qdev, IFUP, INFO,
2906 "Running with MSI interrupts.\n");
2907 return;
2908 }
2909 }
2910 irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002911 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2912}
2913
Ron Mercer39aa8162009-08-27 11:02:11 +00002914/* Each vector services 1 RSS ring and and 1 or more
2915 * TX completion rings. This function loops through
2916 * the TX completion rings and assigns the vector that
2917 * will service it. An example would be if there are
2918 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2919 * This would mean that vector 0 would service RSS ring 0
2920 * and TX competion rings 0,1,2 and 3. Vector 1 would
2921 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2922 */
2923static void ql_set_tx_vect(struct ql_adapter *qdev)
2924{
2925 int i, j, vect;
2926 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2927
2928 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2929 /* Assign irq vectors to TX rx_rings.*/
2930 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2931 i < qdev->rx_ring_count; i++) {
2932 if (j == tx_rings_per_vector) {
2933 vect++;
2934 j = 0;
2935 }
2936 qdev->rx_ring[i].irq = vect;
2937 j++;
2938 }
2939 } else {
2940 /* For single vector all rings have an irq
2941 * of zero.
2942 */
2943 for (i = 0; i < qdev->rx_ring_count; i++)
2944 qdev->rx_ring[i].irq = 0;
2945 }
2946}
2947
2948/* Set the interrupt mask for this vector. Each vector
2949 * will service 1 RSS ring and 1 or more TX completion
2950 * rings. This function sets up a bit mask per vector
2951 * that indicates which rings it services.
2952 */
2953static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2954{
2955 int j, vect = ctx->intr;
2956 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2957
2958 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2959 /* Add the RSS ring serviced by this vector
2960 * to the mask.
2961 */
2962 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2963 /* Add the TX ring(s) serviced by this vector
2964 * to the mask. */
2965 for (j = 0; j < tx_rings_per_vector; j++) {
2966 ctx->irq_mask |=
2967 (1 << qdev->rx_ring[qdev->rss_ring_count +
2968 (vect * tx_rings_per_vector) + j].cq_id);
2969 }
2970 } else {
2971 /* For single vector we just shift each queue's
2972 * ID into the mask.
2973 */
2974 for (j = 0; j < qdev->rx_ring_count; j++)
2975 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2976 }
2977}
2978
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002979/*
2980 * Here we build the intr_context structures based on
2981 * our rx_ring count and intr vector count.
2982 * The intr_context structure is used to hook each vector
2983 * to possibly different handlers.
2984 */
2985static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2986{
2987 int i = 0;
2988 struct intr_context *intr_context = &qdev->intr_context[0];
2989
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002990 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2991 /* Each rx_ring has it's
2992 * own intr_context since we have separate
2993 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002994 */
2995 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2996 qdev->rx_ring[i].irq = i;
2997 intr_context->intr = i;
2998 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002999 /* Set up this vector's bit-mask that indicates
3000 * which queues it services.
3001 */
3002 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003003 /*
3004 * We set up each vectors enable/disable/read bits so
3005 * there's no bit/mask calculations in the critical path.
3006 */
3007 intr_context->intr_en_mask =
3008 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3009 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3010 | i;
3011 intr_context->intr_dis_mask =
3012 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3013 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3014 INTR_EN_IHD | i;
3015 intr_context->intr_read_mask =
3016 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3017 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3018 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003019 if (i == 0) {
3020 /* The first vector/queue handles
3021 * broadcast/multicast, fatal errors,
3022 * and firmware events. This in addition
3023 * to normal inbound NAPI processing.
3024 */
3025 intr_context->handler = qlge_isr;
3026 sprintf(intr_context->name, "%s-rx-%d",
3027 qdev->ndev->name, i);
3028 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003029 /*
3030 * Inbound queues handle unicast frames only.
3031 */
3032 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003033 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003034 qdev->ndev->name, i);
3035 }
3036 }
3037 } else {
3038 /*
3039 * All rx_rings use the same intr_context since
3040 * there is only one vector.
3041 */
3042 intr_context->intr = 0;
3043 intr_context->qdev = qdev;
3044 /*
3045 * We set up each vectors enable/disable/read bits so
3046 * there's no bit/mask calculations in the critical path.
3047 */
3048 intr_context->intr_en_mask =
3049 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3050 intr_context->intr_dis_mask =
3051 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3052 INTR_EN_TYPE_DISABLE;
3053 intr_context->intr_read_mask =
3054 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3055 /*
3056 * Single interrupt means one handler for all rings.
3057 */
3058 intr_context->handler = qlge_isr;
3059 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003060 /* Set up this vector's bit-mask that indicates
3061 * which queues it services. In this case there is
3062 * a single vector so it will service all RSS and
3063 * TX completion rings.
3064 */
3065 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003066 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003067 /* Tell the TX completion rings which MSIx vector
3068 * they will be using.
3069 */
3070 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003071}
3072
3073static void ql_free_irq(struct ql_adapter *qdev)
3074{
3075 int i;
3076 struct intr_context *intr_context = &qdev->intr_context[0];
3077
3078 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3079 if (intr_context->hooked) {
3080 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3081 free_irq(qdev->msi_x_entry[i].vector,
3082 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00003083 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003084 "freeing msix interrupt %d.\n", i);
3085 } else {
3086 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00003087 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003088 "freeing msi interrupt %d.\n", i);
3089 }
3090 }
3091 }
3092 ql_disable_msix(qdev);
3093}
3094
3095static int ql_request_irq(struct ql_adapter *qdev)
3096{
3097 int i;
3098 int status = 0;
3099 struct pci_dev *pdev = qdev->pdev;
3100 struct intr_context *intr_context = &qdev->intr_context[0];
3101
3102 ql_resolve_queues_to_irqs(qdev);
3103
3104 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3105 atomic_set(&intr_context->irq_cnt, 0);
3106 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3107 status = request_irq(qdev->msi_x_entry[i].vector,
3108 intr_context->handler,
3109 0,
3110 intr_context->name,
3111 &qdev->rx_ring[i]);
3112 if (status) {
3113 QPRINTK(qdev, IFUP, ERR,
3114 "Failed request for MSIX interrupt %d.\n",
3115 i);
3116 goto err_irq;
3117 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003118 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003119 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3120 i,
3121 qdev->rx_ring[i].type ==
3122 DEFAULT_Q ? "DEFAULT_Q" : "",
3123 qdev->rx_ring[i].type ==
3124 TX_Q ? "TX_Q" : "",
3125 qdev->rx_ring[i].type ==
3126 RX_Q ? "RX_Q" : "", intr_context->name);
3127 }
3128 } else {
3129 QPRINTK(qdev, IFUP, DEBUG,
3130 "trying msi or legacy interrupts.\n");
3131 QPRINTK(qdev, IFUP, DEBUG,
3132 "%s: irq = %d.\n", __func__, pdev->irq);
3133 QPRINTK(qdev, IFUP, DEBUG,
3134 "%s: context->name = %s.\n", __func__,
3135 intr_context->name);
3136 QPRINTK(qdev, IFUP, DEBUG,
3137 "%s: dev_id = 0x%p.\n", __func__,
3138 &qdev->rx_ring[0]);
3139 status =
3140 request_irq(pdev->irq, qlge_isr,
3141 test_bit(QL_MSI_ENABLED,
3142 &qdev->
3143 flags) ? 0 : IRQF_SHARED,
3144 intr_context->name, &qdev->rx_ring[0]);
3145 if (status)
3146 goto err_irq;
3147
3148 QPRINTK(qdev, IFUP, ERR,
3149 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3150 i,
3151 qdev->rx_ring[0].type ==
3152 DEFAULT_Q ? "DEFAULT_Q" : "",
3153 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3154 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3155 intr_context->name);
3156 }
3157 intr_context->hooked = 1;
3158 }
3159 return status;
3160err_irq:
3161 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3162 ql_free_irq(qdev);
3163 return status;
3164}
3165
3166static int ql_start_rss(struct ql_adapter *qdev)
3167{
Ron Mercer541ae282009-10-08 09:54:37 +00003168 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3169 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3170 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3171 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3172 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3173 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003174 struct ricb *ricb = &qdev->ricb;
3175 int status = 0;
3176 int i;
3177 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3178
Ron Mercere3324712009-07-02 06:06:13 +00003179 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003180
Ron Mercerb2014ff2009-08-27 11:02:09 +00003181 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003183 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3184 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003185
3186 /*
3187 * Fill out the Indirection Table.
3188 */
Ron Mercer541ae282009-10-08 09:54:37 +00003189 for (i = 0; i < 1024; i++)
3190 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003191
Ron Mercer541ae282009-10-08 09:54:37 +00003192 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3193 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003194
Ron Mercer49740972009-02-26 10:08:36 +00003195 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003196
Ron Mercere3324712009-07-02 06:06:13 +00003197 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198 if (status) {
3199 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3200 return status;
3201 }
Ron Mercer49740972009-02-26 10:08:36 +00003202 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003203 return status;
3204}
3205
Ron Mercera5f59dc2009-07-02 06:06:07 +00003206static int ql_clear_routing_entries(struct ql_adapter *qdev)
3207{
3208 int i, status = 0;
3209
3210 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3211 if (status)
3212 return status;
3213 /* Clear all the entries in the routing table. */
3214 for (i = 0; i < 16; i++) {
3215 status = ql_set_routing_reg(qdev, i, 0, 0);
3216 if (status) {
3217 QPRINTK(qdev, IFUP, ERR,
3218 "Failed to init routing register for CAM "
3219 "packets.\n");
3220 break;
3221 }
3222 }
3223 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3224 return status;
3225}
3226
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003227/* Initialize the frame-to-queue routing. */
3228static int ql_route_initialize(struct ql_adapter *qdev)
3229{
3230 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003231
3232 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003233 status = ql_clear_routing_entries(qdev);
3234 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003235 return status;
3236
3237 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3238 if (status)
3239 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240
3241 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3242 if (status) {
3243 QPRINTK(qdev, IFUP, ERR,
3244 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003245 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003246 }
3247 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3248 if (status) {
3249 QPRINTK(qdev, IFUP, ERR,
3250 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003251 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003252 }
3253 /* If we have more than one inbound queue, then turn on RSS in the
3254 * routing block.
3255 */
3256 if (qdev->rss_ring_count > 1) {
3257 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3258 RT_IDX_RSS_MATCH, 1);
3259 if (status) {
3260 QPRINTK(qdev, IFUP, ERR,
3261 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003262 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003263 }
3264 }
3265
3266 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3267 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003268 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003269 QPRINTK(qdev, IFUP, ERR,
3270 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003271exit:
3272 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003273 return status;
3274}
3275
Ron Mercer2ee1e272009-03-03 12:10:33 +00003276int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003277{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003278 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003279
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003280 /* If check if the link is up and use to
3281 * determine if we are setting or clearing
3282 * the MAC address in the CAM.
3283 */
3284 set = ql_read32(qdev, STS);
3285 set &= qdev->port_link_up;
3286 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003287 if (status) {
3288 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3289 return status;
3290 }
3291
3292 status = ql_route_initialize(qdev);
3293 if (status)
3294 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3295
3296 return status;
3297}
3298
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003299static int ql_adapter_initialize(struct ql_adapter *qdev)
3300{
3301 u32 value, mask;
3302 int i;
3303 int status = 0;
3304
3305 /*
3306 * Set up the System register to halt on errors.
3307 */
3308 value = SYS_EFE | SYS_FAE;
3309 mask = value << 16;
3310 ql_write32(qdev, SYS, mask | value);
3311
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003312 /* Set the default queue, and VLAN behavior. */
3313 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3314 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003315 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3316
3317 /* Set the MPI interrupt to enabled. */
3318 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3319
3320 /* Enable the function, set pagesize, enable error checking. */
3321 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3322 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3323
3324 /* Set/clear header splitting. */
3325 mask = FSC_VM_PAGESIZE_MASK |
3326 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3327 ql_write32(qdev, FSC, mask | value);
3328
3329 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
Ron Mercer52e55f32009-10-10 09:35:07 +00003330 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003331
Ron Mercera3b71932009-10-08 09:54:38 +00003332 /* Set RX packet routing to use port/pci function on which the
3333 * packet arrived on in addition to usual frame routing.
3334 * This is helpful on bonding where both interfaces can have
3335 * the same MAC address.
3336 */
3337 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003338 /* Reroute all packets to our Interface.
3339 * They may have been routed to MPI firmware
3340 * due to WOL.
3341 */
3342 value = ql_read32(qdev, MGMT_RCV_CFG);
3343 value &= ~MGMT_RCV_CFG_RM;
3344 mask = 0xffff0000;
3345
3346 /* Sticky reg needs clearing due to WOL. */
3347 ql_write32(qdev, MGMT_RCV_CFG, mask);
3348 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3349
3350 /* Default WOL is enable on Mezz cards */
3351 if (qdev->pdev->subsystem_device == 0x0068 ||
3352 qdev->pdev->subsystem_device == 0x0180)
3353 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003354
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003355 /* Start up the rx queues. */
3356 for (i = 0; i < qdev->rx_ring_count; i++) {
3357 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3358 if (status) {
3359 QPRINTK(qdev, IFUP, ERR,
3360 "Failed to start rx ring[%d].\n", i);
3361 return status;
3362 }
3363 }
3364
3365 /* If there is more than one inbound completion queue
3366 * then download a RICB to configure RSS.
3367 */
3368 if (qdev->rss_ring_count > 1) {
3369 status = ql_start_rss(qdev);
3370 if (status) {
3371 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3372 return status;
3373 }
3374 }
3375
3376 /* Start up the tx queues. */
3377 for (i = 0; i < qdev->tx_ring_count; i++) {
3378 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3379 if (status) {
3380 QPRINTK(qdev, IFUP, ERR,
3381 "Failed to start tx ring[%d].\n", i);
3382 return status;
3383 }
3384 }
3385
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003386 /* Initialize the port and set the max framesize. */
3387 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003388 if (status)
3389 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003390
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003391 /* Set up the MAC address and frame routing filter. */
3392 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003393 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003394 QPRINTK(qdev, IFUP, ERR,
3395 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003396 return status;
3397 }
3398
3399 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003400 for (i = 0; i < qdev->rss_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003401 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003402 i);
3403 napi_enable(&qdev->rx_ring[i].napi);
3404 }
3405
3406 return status;
3407}
3408
3409/* Issue soft reset to chip. */
3410static int ql_adapter_reset(struct ql_adapter *qdev)
3411{
3412 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003413 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003414 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003415
Ron Mercera5f59dc2009-07-02 06:06:07 +00003416 /* Clear all the entries in the routing table. */
3417 status = ql_clear_routing_entries(qdev);
3418 if (status) {
3419 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3420 return status;
3421 }
3422
3423 end_jiffies = jiffies +
3424 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003425
3426 /* Stop management traffic. */
3427 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3428
3429 /* Wait for the NIC and MGMNT FIFOs to empty. */
3430 ql_wait_fifo_empty(qdev);
3431
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003432 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003433
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003434 do {
3435 value = ql_read32(qdev, RST_FO);
3436 if ((value & RST_FO_FR) == 0)
3437 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003438 cpu_relax();
3439 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003440
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003441 if (value & RST_FO_FR) {
3442 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003443 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003444 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003445 }
3446
Ron Mercer84087f42009-10-08 09:54:41 +00003447 /* Resume management traffic. */
3448 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003449 return status;
3450}
3451
3452static void ql_display_dev_info(struct net_device *ndev)
3453{
3454 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3455
3456 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003457 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003458 "XG Roll = %d, XG Rev = %d.\n",
3459 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003460 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003461 qdev->chip_rev_id & 0x0000000f,
3462 qdev->chip_rev_id >> 4 & 0x0000000f,
3463 qdev->chip_rev_id >> 8 & 0x0000000f,
3464 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003465 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003466}
3467
Ron Mercerbc083ce2009-10-21 11:07:40 +00003468int ql_wol(struct ql_adapter *qdev)
3469{
3470 int status = 0;
3471 u32 wol = MB_WOL_DISABLE;
3472
3473 /* The CAM is still intact after a reset, but if we
3474 * are doing WOL, then we may need to program the
3475 * routing regs. We would also need to issue the mailbox
3476 * commands to instruct the MPI what to do per the ethtool
3477 * settings.
3478 */
3479
3480 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3481 WAKE_MCAST | WAKE_BCAST)) {
3482 QPRINTK(qdev, IFDOWN, ERR,
3483 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3484 qdev->wol);
3485 return -EINVAL;
3486 }
3487
3488 if (qdev->wol & WAKE_MAGIC) {
3489 status = ql_mb_wol_set_magic(qdev, 1);
3490 if (status) {
3491 QPRINTK(qdev, IFDOWN, ERR,
3492 "Failed to set magic packet on %s.\n",
3493 qdev->ndev->name);
3494 return status;
3495 } else
3496 QPRINTK(qdev, DRV, INFO,
3497 "Enabled magic packet successfully on %s.\n",
3498 qdev->ndev->name);
3499
3500 wol |= MB_WOL_MAGIC_PKT;
3501 }
3502
3503 if (qdev->wol) {
3504 /* Reroute all packets to Management Interface */
3505 ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
3506 (MGMT_RCV_CFG_RM << 16)));
3507 wol |= MB_WOL_MODE_ON;
3508 status = ql_mb_wol_mode(qdev, wol);
3509 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3510 (status == 0) ? "Sucessfully set" : "Failed", wol,
3511 qdev->ndev->name);
3512 }
3513
3514 return status;
3515}
3516
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003517static int ql_adapter_down(struct ql_adapter *qdev)
3518{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003519 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003520
Ron Mercer6a473302009-07-02 06:06:12 +00003521 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003522
Ron Mercer6497b602009-02-12 16:37:13 -08003523 /* Don't kill the reset worker thread if we
3524 * are in the process of recovery.
3525 */
3526 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3527 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003528 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3529 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003530 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003531 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003532
Ron Mercer39aa8162009-08-27 11:02:11 +00003533 for (i = 0; i < qdev->rss_ring_count; i++)
3534 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003535
3536 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3537
3538 ql_disable_interrupts(qdev);
3539
3540 ql_tx_ring_clean(qdev);
3541
Ron Mercer6b318cb2009-03-09 10:59:26 +00003542 /* Call netif_napi_del() from common point.
3543 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003544 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003545 netif_napi_del(&qdev->rx_ring[i].napi);
3546
Ron Mercer4545a3f2009-02-23 10:42:17 +00003547 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003548
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549 status = ql_adapter_reset(qdev);
3550 if (status)
3551 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3552 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553 return status;
3554}
3555
3556static int ql_adapter_up(struct ql_adapter *qdev)
3557{
3558 int err = 0;
3559
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003560 err = ql_adapter_initialize(qdev);
3561 if (err) {
3562 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 goto err_init;
3564 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003566 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003567 /* If the port is initialized and the
3568 * link is up the turn on the carrier.
3569 */
3570 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3571 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003572 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003573 ql_enable_interrupts(qdev);
3574 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003575 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003576
3577 return 0;
3578err_init:
3579 ql_adapter_reset(qdev);
3580 return err;
3581}
3582
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003583static void ql_release_adapter_resources(struct ql_adapter *qdev)
3584{
3585 ql_free_mem_resources(qdev);
3586 ql_free_irq(qdev);
3587}
3588
3589static int ql_get_adapter_resources(struct ql_adapter *qdev)
3590{
3591 int status = 0;
3592
3593 if (ql_alloc_mem_resources(qdev)) {
3594 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3595 return -ENOMEM;
3596 }
3597 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598 return status;
3599}
3600
3601static int qlge_close(struct net_device *ndev)
3602{
3603 struct ql_adapter *qdev = netdev_priv(ndev);
3604
3605 /*
3606 * Wait for device to recover from a reset.
3607 * (Rarely happens, but possible.)
3608 */
3609 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3610 msleep(1);
3611 ql_adapter_down(qdev);
3612 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003613 return 0;
3614}
3615
3616static int ql_configure_rings(struct ql_adapter *qdev)
3617{
3618 int i;
3619 struct rx_ring *rx_ring;
3620 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003621 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00003622 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3623 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3624
3625 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003626
Ron Mercera4ab6132009-08-27 11:02:10 +00003627 /* In a perfect world we have one RSS ring for each CPU
3628 * and each has it's own vector. To do that we ask for
3629 * cpu_cnt vectors. ql_enable_msix() will adjust the
3630 * vector count to what we actually get. We then
3631 * allocate an RSS ring for each.
3632 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003633 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003634 qdev->intr_count = cpu_cnt;
3635 ql_enable_msix(qdev);
3636 /* Adjust the RSS ring count to the actual vector count. */
3637 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003638 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003639 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003640
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641 for (i = 0; i < qdev->tx_ring_count; i++) {
3642 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003643 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003644 tx_ring->qdev = qdev;
3645 tx_ring->wq_id = i;
3646 tx_ring->wq_len = qdev->tx_ring_size;
3647 tx_ring->wq_size =
3648 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3649
3650 /*
3651 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00003652 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003653 */
Ron Mercer39aa8162009-08-27 11:02:11 +00003654 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003655 }
3656
3657 for (i = 0; i < qdev->rx_ring_count; i++) {
3658 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003659 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003660 rx_ring->qdev = qdev;
3661 rx_ring->cq_id = i;
3662 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003663 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00003664 /*
3665 * Inbound (RSS) queues.
3666 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003667 rx_ring->cq_len = qdev->rx_ring_size;
3668 rx_ring->cq_size =
3669 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3670 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3671 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003672 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00003673 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3674 QPRINTK(qdev, IFUP, DEBUG,
3675 "lbq_buf_size %d, order = %d\n",
3676 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003677 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3678 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003679 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00003680 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003681 rx_ring->type = RX_Q;
3682 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683 /*
3684 * Outbound queue handles outbound completions only.
3685 */
3686 /* outbound cq is same size as tx_ring it services. */
3687 rx_ring->cq_len = qdev->tx_ring_size;
3688 rx_ring->cq_size =
3689 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3690 rx_ring->lbq_len = 0;
3691 rx_ring->lbq_size = 0;
3692 rx_ring->lbq_buf_size = 0;
3693 rx_ring->sbq_len = 0;
3694 rx_ring->sbq_size = 0;
3695 rx_ring->sbq_buf_size = 0;
3696 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003697 }
3698 }
3699 return 0;
3700}
3701
3702static int qlge_open(struct net_device *ndev)
3703{
3704 int err = 0;
3705 struct ql_adapter *qdev = netdev_priv(ndev);
3706
3707 err = ql_configure_rings(qdev);
3708 if (err)
3709 return err;
3710
3711 err = ql_get_adapter_resources(qdev);
3712 if (err)
3713 goto error_up;
3714
3715 err = ql_adapter_up(qdev);
3716 if (err)
3717 goto error_up;
3718
3719 return err;
3720
3721error_up:
3722 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003723 return err;
3724}
3725
Ron Mercer7c734352009-10-19 03:32:19 +00003726static int ql_change_rx_buffers(struct ql_adapter *qdev)
3727{
3728 struct rx_ring *rx_ring;
3729 int i, status;
3730 u32 lbq_buf_len;
3731
3732 /* Wait for an oustanding reset to complete. */
3733 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3734 int i = 3;
3735 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3736 QPRINTK(qdev, IFUP, ERR,
3737 "Waiting for adapter UP...\n");
3738 ssleep(1);
3739 }
3740
3741 if (!i) {
3742 QPRINTK(qdev, IFUP, ERR,
3743 "Timed out waiting for adapter UP\n");
3744 return -ETIMEDOUT;
3745 }
3746 }
3747
3748 status = ql_adapter_down(qdev);
3749 if (status)
3750 goto error;
3751
3752 /* Get the new rx buffer size. */
3753 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3754 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3755 qdev->lbq_buf_order = get_order(lbq_buf_len);
3756
3757 for (i = 0; i < qdev->rss_ring_count; i++) {
3758 rx_ring = &qdev->rx_ring[i];
3759 /* Set the new size. */
3760 rx_ring->lbq_buf_size = lbq_buf_len;
3761 }
3762
3763 status = ql_adapter_up(qdev);
3764 if (status)
3765 goto error;
3766
3767 return status;
3768error:
3769 QPRINTK(qdev, IFUP, ALERT,
3770 "Driver up/down cycle failed, closing device.\n");
3771 set_bit(QL_ADAPTER_UP, &qdev->flags);
3772 dev_close(qdev->ndev);
3773 return status;
3774}
3775
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003776static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3777{
3778 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00003779 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003780
3781 if (ndev->mtu == 1500 && new_mtu == 9000) {
3782 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3783 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3784 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3785 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3786 (ndev->mtu == 9000 && new_mtu == 9000)) {
3787 return 0;
3788 } else
3789 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00003790
3791 queue_delayed_work(qdev->workqueue,
3792 &qdev->mpi_port_cfg_work, 3*HZ);
3793
3794 if (!netif_running(qdev->ndev)) {
3795 ndev->mtu = new_mtu;
3796 return 0;
3797 }
3798
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003799 ndev->mtu = new_mtu;
Ron Mercer7c734352009-10-19 03:32:19 +00003800 status = ql_change_rx_buffers(qdev);
3801 if (status) {
3802 QPRINTK(qdev, IFUP, ERR,
3803 "Changing MTU failed.\n");
3804 }
3805
3806 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003807}
3808
3809static struct net_device_stats *qlge_get_stats(struct net_device
3810 *ndev)
3811{
Ajit Khapardebcc90f52009-10-07 02:46:09 +00003812 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813}
3814
3815static void qlge_set_multicast_list(struct net_device *ndev)
3816{
3817 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3818 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00003819 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003820
Ron Mercercc288f52009-02-23 10:42:14 +00003821 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3822 if (status)
3823 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003824 /*
3825 * Set or clear promiscuous mode if a
3826 * transition is taking place.
3827 */
3828 if (ndev->flags & IFF_PROMISC) {
3829 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3830 if (ql_set_routing_reg
3831 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3832 QPRINTK(qdev, HW, ERR,
3833 "Failed to set promiscous mode.\n");
3834 } else {
3835 set_bit(QL_PROMISCUOUS, &qdev->flags);
3836 }
3837 }
3838 } else {
3839 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3840 if (ql_set_routing_reg
3841 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3842 QPRINTK(qdev, HW, ERR,
3843 "Failed to clear promiscous mode.\n");
3844 } else {
3845 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3846 }
3847 }
3848 }
3849
3850 /*
3851 * Set or clear all multicast mode if a
3852 * transition is taking place.
3853 */
3854 if ((ndev->flags & IFF_ALLMULTI) ||
3855 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3856 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3857 if (ql_set_routing_reg
3858 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3859 QPRINTK(qdev, HW, ERR,
3860 "Failed to set all-multi mode.\n");
3861 } else {
3862 set_bit(QL_ALLMULTI, &qdev->flags);
3863 }
3864 }
3865 } else {
3866 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3867 if (ql_set_routing_reg
3868 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3869 QPRINTK(qdev, HW, ERR,
3870 "Failed to clear all-multi mode.\n");
3871 } else {
3872 clear_bit(QL_ALLMULTI, &qdev->flags);
3873 }
3874 }
3875 }
3876
3877 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00003878 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3879 if (status)
3880 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003881 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3882 i++, mc_ptr = mc_ptr->next)
3883 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3884 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3885 QPRINTK(qdev, HW, ERR,
3886 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00003887 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003888 goto exit;
3889 }
Ron Mercercc288f52009-02-23 10:42:14 +00003890 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003891 if (ql_set_routing_reg
3892 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3893 QPRINTK(qdev, HW, ERR,
3894 "Failed to set multicast match mode.\n");
3895 } else {
3896 set_bit(QL_ALLMULTI, &qdev->flags);
3897 }
3898 }
3899exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00003900 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003901}
3902
3903static int qlge_set_mac_address(struct net_device *ndev, void *p)
3904{
3905 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3906 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00003907 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003908
3909 if (netif_running(ndev))
3910 return -EBUSY;
3911
3912 if (!is_valid_ether_addr(addr->sa_data))
3913 return -EADDRNOTAVAIL;
3914 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3915
Ron Mercercc288f52009-02-23 10:42:14 +00003916 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3917 if (status)
3918 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00003919 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3920 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00003921 if (status)
3922 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3923 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3924 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003925}
3926
3927static void qlge_tx_timeout(struct net_device *ndev)
3928{
3929 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003930 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003931}
3932
3933static void ql_asic_reset_work(struct work_struct *work)
3934{
3935 struct ql_adapter *qdev =
3936 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00003937 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003938 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00003939 status = ql_adapter_down(qdev);
3940 if (status)
3941 goto error;
3942
3943 status = ql_adapter_up(qdev);
3944 if (status)
3945 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00003946
3947 /* Restore rx mode. */
3948 clear_bit(QL_ALLMULTI, &qdev->flags);
3949 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3950 qlge_set_multicast_list(qdev->ndev);
3951
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003952 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00003953 return;
3954error:
3955 QPRINTK(qdev, IFUP, ALERT,
3956 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003957
Ron Mercerdb988122009-03-09 10:59:17 +00003958 set_bit(QL_ADAPTER_UP, &qdev->flags);
3959 dev_close(qdev->ndev);
3960 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003961}
3962
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003963static struct nic_operations qla8012_nic_ops = {
3964 .get_flash = ql_get_8012_flash_params,
3965 .port_initialize = ql_8012_port_initialize,
3966};
3967
Ron Mercercdca8d02009-03-02 08:07:31 +00003968static struct nic_operations qla8000_nic_ops = {
3969 .get_flash = ql_get_8000_flash_params,
3970 .port_initialize = ql_8000_port_initialize,
3971};
3972
Ron Mercere4552f52009-06-09 05:39:32 +00003973/* Find the pcie function number for the other NIC
3974 * on this chip. Since both NIC functions share a
3975 * common firmware we have the lowest enabled function
3976 * do any common work. Examples would be resetting
3977 * after a fatal firmware error, or doing a firmware
3978 * coredump.
3979 */
3980static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003981{
Ron Mercere4552f52009-06-09 05:39:32 +00003982 int status = 0;
3983 u32 temp;
3984 u32 nic_func1, nic_func2;
3985
3986 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3987 &temp);
3988 if (status)
3989 return status;
3990
3991 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3992 MPI_TEST_NIC_FUNC_MASK);
3993 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3994 MPI_TEST_NIC_FUNC_MASK);
3995
3996 if (qdev->func == nic_func1)
3997 qdev->alt_func = nic_func2;
3998 else if (qdev->func == nic_func2)
3999 qdev->alt_func = nic_func1;
4000 else
4001 status = -EIO;
4002
4003 return status;
4004}
4005
4006static int ql_get_board_info(struct ql_adapter *qdev)
4007{
4008 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004009 qdev->func =
4010 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004011 if (qdev->func > 3)
4012 return -EIO;
4013
4014 status = ql_get_alt_pcie_func(qdev);
4015 if (status)
4016 return status;
4017
4018 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4019 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004020 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4021 qdev->port_link_up = STS_PL1;
4022 qdev->port_init = STS_PI1;
4023 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4024 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4025 } else {
4026 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4027 qdev->port_link_up = STS_PL0;
4028 qdev->port_init = STS_PI0;
4029 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4030 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4031 }
4032 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004033 qdev->device_id = qdev->pdev->device;
4034 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4035 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004036 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4037 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004038 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004039}
4040
4041static void ql_release_all(struct pci_dev *pdev)
4042{
4043 struct net_device *ndev = pci_get_drvdata(pdev);
4044 struct ql_adapter *qdev = netdev_priv(ndev);
4045
4046 if (qdev->workqueue) {
4047 destroy_workqueue(qdev->workqueue);
4048 qdev->workqueue = NULL;
4049 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004050
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004051 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004052 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004053 if (qdev->doorbell_area)
4054 iounmap(qdev->doorbell_area);
4055 pci_release_regions(pdev);
4056 pci_set_drvdata(pdev, NULL);
4057}
4058
4059static int __devinit ql_init_device(struct pci_dev *pdev,
4060 struct net_device *ndev, int cards_found)
4061{
4062 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004063 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004064
Ron Mercere3324712009-07-02 06:06:13 +00004065 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004066 err = pci_enable_device(pdev);
4067 if (err) {
4068 dev_err(&pdev->dev, "PCI device enable failed.\n");
4069 return err;
4070 }
4071
Ron Mercerebd6e772009-09-29 08:39:25 +00004072 qdev->ndev = ndev;
4073 qdev->pdev = pdev;
4074 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004075
Ron Mercerbc9167f2009-10-10 09:35:04 +00004076 /* Set PCIe read request size */
4077 err = pcie_set_readrq(pdev, 4096);
4078 if (err) {
4079 dev_err(&pdev->dev, "Set readrq failed.\n");
4080 goto err_out;
4081 }
4082
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004083 err = pci_request_regions(pdev, DRV_NAME);
4084 if (err) {
4085 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004086 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004087 }
4088
4089 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004090 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004091 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004092 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004093 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004094 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004095 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004096 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004097 }
4098
4099 if (err) {
4100 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4101 goto err_out;
4102 }
4103
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004104 qdev->reg_base =
4105 ioremap_nocache(pci_resource_start(pdev, 1),
4106 pci_resource_len(pdev, 1));
4107 if (!qdev->reg_base) {
4108 dev_err(&pdev->dev, "Register mapping failed.\n");
4109 err = -ENOMEM;
4110 goto err_out;
4111 }
4112
4113 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4114 qdev->doorbell_area =
4115 ioremap_nocache(pci_resource_start(pdev, 3),
4116 pci_resource_len(pdev, 3));
4117 if (!qdev->doorbell_area) {
4118 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4119 err = -ENOMEM;
4120 goto err_out;
4121 }
4122
Ron Mercere4552f52009-06-09 05:39:32 +00004123 err = ql_get_board_info(qdev);
4124 if (err) {
4125 dev_err(&pdev->dev, "Register access failed.\n");
4126 err = -EIO;
4127 goto err_out;
4128 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004129 qdev->msg_enable = netif_msg_init(debug, default_msg);
4130 spin_lock_init(&qdev->hw_lock);
4131 spin_lock_init(&qdev->stats_lock);
4132
4133 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004134 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004135 if (err) {
4136 dev_err(&pdev->dev, "Invalid FLASH.\n");
4137 goto err_out;
4138 }
4139
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004140 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4141
4142 /* Set up the default ring sizes. */
4143 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4144 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4145
4146 /* Set up the coalescing parameters. */
4147 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4148 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4149 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4150 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4151
4152 /*
4153 * Set up the operating parameters.
4154 */
4155 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004156 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4157 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4158 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4159 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004160 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004161 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004162 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004163
4164 if (!cards_found) {
4165 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4166 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4167 DRV_NAME, DRV_VERSION);
4168 }
4169 return 0;
4170err_out:
4171 ql_release_all(pdev);
4172 pci_disable_device(pdev);
4173 return err;
4174}
4175
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004176
4177static const struct net_device_ops qlge_netdev_ops = {
4178 .ndo_open = qlge_open,
4179 .ndo_stop = qlge_close,
4180 .ndo_start_xmit = qlge_send,
4181 .ndo_change_mtu = qlge_change_mtu,
4182 .ndo_get_stats = qlge_get_stats,
4183 .ndo_set_multicast_list = qlge_set_multicast_list,
4184 .ndo_set_mac_address = qlge_set_mac_address,
4185 .ndo_validate_addr = eth_validate_addr,
4186 .ndo_tx_timeout = qlge_tx_timeout,
4187 .ndo_vlan_rx_register = ql_vlan_rx_register,
4188 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
4189 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
4190};
4191
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004192static int __devinit qlge_probe(struct pci_dev *pdev,
4193 const struct pci_device_id *pci_entry)
4194{
4195 struct net_device *ndev = NULL;
4196 struct ql_adapter *qdev = NULL;
4197 static int cards_found = 0;
4198 int err = 0;
4199
Ron Mercer1e213302009-03-09 10:59:21 +00004200 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4201 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004202 if (!ndev)
4203 return -ENOMEM;
4204
4205 err = ql_init_device(pdev, ndev, cards_found);
4206 if (err < 0) {
4207 free_netdev(ndev);
4208 return err;
4209 }
4210
4211 qdev = netdev_priv(ndev);
4212 SET_NETDEV_DEV(ndev, &pdev->dev);
4213 ndev->features = (0
4214 | NETIF_F_IP_CSUM
4215 | NETIF_F_SG
4216 | NETIF_F_TSO
4217 | NETIF_F_TSO6
4218 | NETIF_F_TSO_ECN
4219 | NETIF_F_HW_VLAN_TX
4220 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004221 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004222
4223 if (test_bit(QL_DMA64, &qdev->flags))
4224 ndev->features |= NETIF_F_HIGHDMA;
4225
4226 /*
4227 * Set up net_device structure.
4228 */
4229 ndev->tx_queue_len = qdev->tx_ring_size;
4230 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004231
4232 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004233 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004234 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004235
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004236 err = register_netdev(ndev);
4237 if (err) {
4238 dev_err(&pdev->dev, "net device registration failed.\n");
4239 ql_release_all(pdev);
4240 pci_disable_device(pdev);
4241 return err;
4242 }
Ron Mercer6a473302009-07-02 06:06:12 +00004243 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004244 ql_display_dev_info(ndev);
4245 cards_found++;
4246 return 0;
4247}
4248
4249static void __devexit qlge_remove(struct pci_dev *pdev)
4250{
4251 struct net_device *ndev = pci_get_drvdata(pdev);
4252 unregister_netdev(ndev);
4253 ql_release_all(pdev);
4254 pci_disable_device(pdev);
4255 free_netdev(ndev);
4256}
4257
4258/*
4259 * This callback is called by the PCI subsystem whenever
4260 * a PCI bus error is detected.
4261 */
4262static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4263 enum pci_channel_state state)
4264{
4265 struct net_device *ndev = pci_get_drvdata(pdev);
4266 struct ql_adapter *qdev = netdev_priv(ndev);
4267
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004268 netif_device_detach(ndev);
4269
4270 if (state == pci_channel_io_perm_failure)
4271 return PCI_ERS_RESULT_DISCONNECT;
4272
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004273 if (netif_running(ndev))
4274 ql_adapter_down(qdev);
4275
4276 pci_disable_device(pdev);
4277
4278 /* Request a slot reset. */
4279 return PCI_ERS_RESULT_NEED_RESET;
4280}
4281
4282/*
4283 * This callback is called after the PCI buss has been reset.
4284 * Basically, this tries to restart the card from scratch.
4285 * This is a shortened version of the device probe/discovery code,
4286 * it resembles the first-half of the () routine.
4287 */
4288static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4289{
4290 struct net_device *ndev = pci_get_drvdata(pdev);
4291 struct ql_adapter *qdev = netdev_priv(ndev);
4292
4293 if (pci_enable_device(pdev)) {
4294 QPRINTK(qdev, IFUP, ERR,
4295 "Cannot re-enable PCI device after reset.\n");
4296 return PCI_ERS_RESULT_DISCONNECT;
4297 }
4298
4299 pci_set_master(pdev);
4300
4301 netif_carrier_off(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004302 ql_adapter_reset(qdev);
4303
4304 /* Make sure the EEPROM is good */
4305 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4306
4307 if (!is_valid_ether_addr(ndev->perm_addr)) {
4308 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4309 return PCI_ERS_RESULT_DISCONNECT;
4310 }
4311
4312 return PCI_ERS_RESULT_RECOVERED;
4313}
4314
4315static void qlge_io_resume(struct pci_dev *pdev)
4316{
4317 struct net_device *ndev = pci_get_drvdata(pdev);
4318 struct ql_adapter *qdev = netdev_priv(ndev);
4319
4320 pci_set_master(pdev);
4321
4322 if (netif_running(ndev)) {
4323 if (ql_adapter_up(qdev)) {
4324 QPRINTK(qdev, IFUP, ERR,
4325 "Device initialization failed after reset.\n");
4326 return;
4327 }
4328 }
4329
4330 netif_device_attach(ndev);
4331}
4332
4333static struct pci_error_handlers qlge_err_handler = {
4334 .error_detected = qlge_io_error_detected,
4335 .slot_reset = qlge_io_slot_reset,
4336 .resume = qlge_io_resume,
4337};
4338
4339static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4340{
4341 struct net_device *ndev = pci_get_drvdata(pdev);
4342 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004343 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004344
4345 netif_device_detach(ndev);
4346
4347 if (netif_running(ndev)) {
4348 err = ql_adapter_down(qdev);
4349 if (!err)
4350 return err;
4351 }
4352
Ron Mercerbc083ce2009-10-21 11:07:40 +00004353 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004354 err = pci_save_state(pdev);
4355 if (err)
4356 return err;
4357
4358 pci_disable_device(pdev);
4359
4360 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4361
4362 return 0;
4363}
4364
David S. Miller04da2cf2008-09-19 16:14:24 -07004365#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004366static int qlge_resume(struct pci_dev *pdev)
4367{
4368 struct net_device *ndev = pci_get_drvdata(pdev);
4369 struct ql_adapter *qdev = netdev_priv(ndev);
4370 int err;
4371
4372 pci_set_power_state(pdev, PCI_D0);
4373 pci_restore_state(pdev);
4374 err = pci_enable_device(pdev);
4375 if (err) {
4376 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4377 return err;
4378 }
4379 pci_set_master(pdev);
4380
4381 pci_enable_wake(pdev, PCI_D3hot, 0);
4382 pci_enable_wake(pdev, PCI_D3cold, 0);
4383
4384 if (netif_running(ndev)) {
4385 err = ql_adapter_up(qdev);
4386 if (err)
4387 return err;
4388 }
4389
4390 netif_device_attach(ndev);
4391
4392 return 0;
4393}
David S. Miller04da2cf2008-09-19 16:14:24 -07004394#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004395
4396static void qlge_shutdown(struct pci_dev *pdev)
4397{
4398 qlge_suspend(pdev, PMSG_SUSPEND);
4399}
4400
4401static struct pci_driver qlge_driver = {
4402 .name = DRV_NAME,
4403 .id_table = qlge_pci_tbl,
4404 .probe = qlge_probe,
4405 .remove = __devexit_p(qlge_remove),
4406#ifdef CONFIG_PM
4407 .suspend = qlge_suspend,
4408 .resume = qlge_resume,
4409#endif
4410 .shutdown = qlge_shutdown,
4411 .err_handler = &qlge_err_handler
4412};
4413
4414static int __init qlge_init_module(void)
4415{
4416 return pci_register_driver(&qlge_driver);
4417}
4418
4419static void __exit qlge_exit(void)
4420{
4421 pci_unregister_driver(&qlge_driver);
4422}
4423
4424module_init(qlge_init_module);
4425module_exit(qlge_exit);