blob: fde5af0d5b46d03f73aed955224f41aa5981f4da [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000077 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
Ron Mercer4322c5b2009-07-02 06:06:06 +0000216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
323 case MAC_ADDR_TYPE_CAM_MAC:
324 {
325 u32 cam_output;
326 u32 upper = (addr[0] << 8) | addr[1];
327 u32 lower =
328 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
329 (addr[5]);
330
Ron Mercer49740972009-02-26 10:08:36 +0000331 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700332 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400333 " at index %d in the CAM.\n",
334 ((type ==
335 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700336 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400337
338 status =
339 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800340 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400341 if (status)
342 goto exit;
343 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
344 (index << MAC_ADDR_IDX_SHIFT) | /* index */
345 type); /* type */
346 ql_write32(qdev, MAC_ADDR_DATA, lower);
347 status =
348 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400350 if (status)
351 goto exit;
352 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
353 (index << MAC_ADDR_IDX_SHIFT) | /* index */
354 type); /* type */
355 ql_write32(qdev, MAC_ADDR_DATA, upper);
356 status =
357 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800358 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400359 if (status)
360 goto exit;
361 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
362 (index << MAC_ADDR_IDX_SHIFT) | /* index */
363 type); /* type */
364 /* This field should also include the queue id
365 and possibly the function id. Right now we hardcode
366 the route field to NIC core.
367 */
368 if (type == MAC_ADDR_TYPE_CAM_MAC) {
369 cam_output = (CAM_OUT_ROUTE_NIC |
370 (qdev->
371 func << CAM_OUT_FUNC_SHIFT) |
Ron Mercerb2014ff2009-08-27 11:02:09 +0000372 (0 << CAM_OUT_CQ_ID_SHIFT));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400373 if (qdev->vlgrp)
374 cam_output |= CAM_OUT_RV;
375 /* route to NIC core */
376 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
377 }
378 break;
379 }
380 case MAC_ADDR_TYPE_VLAN:
381 {
382 u32 enable_bit = *((u32 *) &addr[0]);
383 /* For VLAN, the addr actually holds a bit that
384 * either enables or disables the vlan id we are
385 * addressing. It's either MAC_ADDR_E on or off.
386 * That's bit-27 we're talking about.
387 */
388 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
389 (enable_bit ? "Adding" : "Removing"),
390 index, (enable_bit ? "to" : "from"));
391
392 status =
393 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400395 if (status)
396 goto exit;
397 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
399 type | /* type */
400 enable_bit); /* enable/disable */
401 break;
402 }
403 case MAC_ADDR_TYPE_MULTI_FLTR:
404 default:
405 QPRINTK(qdev, IFUP, CRIT,
406 "Address type %d not yet supported.\n", type);
407 status = -EPERM;
408 }
409exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400410 return status;
411}
412
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000413/* Set or clear MAC address in hardware. We sometimes
414 * have to clear it to prevent wrong frame routing
415 * especially in a bonding environment.
416 */
417static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
418{
419 int status;
420 char zero_mac_addr[ETH_ALEN];
421 char *addr;
422
423 if (set) {
424 addr = &qdev->ndev->dev_addr[0];
425 QPRINTK(qdev, IFUP, DEBUG,
426 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
427 addr[0], addr[1], addr[2], addr[3],
428 addr[4], addr[5]);
429 } else {
430 memset(zero_mac_addr, 0, ETH_ALEN);
431 addr = &zero_mac_addr[0];
432 QPRINTK(qdev, IFUP, DEBUG,
433 "Clearing MAC address on %s\n",
434 qdev->ndev->name);
435 }
436 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
437 if (status)
438 return status;
439 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
440 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
441 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
442 if (status)
443 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
444 "address.\n");
445 return status;
446}
447
Ron Mercer6a473302009-07-02 06:06:12 +0000448void ql_link_on(struct ql_adapter *qdev)
449{
450 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
451 qdev->ndev->name);
452 netif_carrier_on(qdev->ndev);
453 ql_set_mac_addr(qdev, 1);
454}
455
456void ql_link_off(struct ql_adapter *qdev)
457{
458 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
459 qdev->ndev->name);
460 netif_carrier_off(qdev->ndev);
461 ql_set_mac_addr(qdev, 0);
462}
463
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400464/* Get a specific frame routing value from the CAM.
465 * Used for debug and reg dump.
466 */
467int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
468{
469 int status = 0;
470
Ron Mercer939678f2009-01-04 17:08:29 -0800471 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400472 if (status)
473 goto exit;
474
475 ql_write32(qdev, RT_IDX,
476 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800477 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400478 if (status)
479 goto exit;
480 *value = ql_read32(qdev, RT_DATA);
481exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400482 return status;
483}
484
485/* The NIC function for this chip has 16 routing indexes. Each one can be used
486 * to route different frame types to various inbound queues. We send broadcast/
487 * multicast/error frames to the default queue for slow handling,
488 * and CAM hit/RSS frames to the fast handling queues.
489 */
490static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
491 int enable)
492{
Ron Mercer8587ea32009-02-23 10:42:15 +0000493 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400494 u32 value = 0;
495
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400496 QPRINTK(qdev, IFUP, DEBUG,
497 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
498 (enable ? "Adding" : "Removing"),
499 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
500 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
501 ((index ==
502 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
503 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
504 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
505 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
506 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
507 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
508 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
509 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
510 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
511 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
512 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
513 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
514 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
515 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
516 (enable ? "to" : "from"));
517
518 switch (mask) {
519 case RT_IDX_CAM_HIT:
520 {
521 value = RT_IDX_DST_CAM_Q | /* dest */
522 RT_IDX_TYPE_NICQ | /* type */
523 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
524 break;
525 }
526 case RT_IDX_VALID: /* Promiscuous Mode frames. */
527 {
528 value = RT_IDX_DST_DFLT_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
547 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
548 {
549 value = RT_IDX_DST_CAM_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
552 break;
553 }
554 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
555 {
556 value = RT_IDX_DST_CAM_Q | /* dest */
557 RT_IDX_TYPE_NICQ | /* type */
558 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
559 break;
560 }
561 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
562 {
563 value = RT_IDX_DST_RSS | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case 0: /* Clear the E-bit on an entry. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (index << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 default:
576 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
577 mask);
578 status = -EPERM;
579 goto exit;
580 }
581
582 if (value) {
583 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
584 if (status)
585 goto exit;
586 value |= (enable ? RT_IDX_E : 0);
587 ql_write32(qdev, RT_IDX, value);
588 ql_write32(qdev, RT_DATA, enable ? mask : 0);
589 }
590exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400591 return status;
592}
593
594static void ql_enable_interrupts(struct ql_adapter *qdev)
595{
596 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
597}
598
599static void ql_disable_interrupts(struct ql_adapter *qdev)
600{
601 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
602}
603
604/* If we're running with multiple MSI-X vectors then we enable on the fly.
605 * Otherwise, we may have multiple outstanding workers and don't want to
606 * enable until the last one finishes. In this case, the irq_cnt gets
607 * incremented everytime we queue a worker and decremented everytime
608 * a worker finishes. Once it hits zero we enable the interrupt.
609 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700610u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400611{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700612 u32 var = 0;
613 unsigned long hw_flags = 0;
614 struct intr_context *ctx = qdev->intr_context + intr;
615
616 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
617 /* Always enable if we're MSIX multi interrupts and
618 * it's not the default (zeroeth) interrupt.
619 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400620 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700621 ctx->intr_en_mask);
622 var = ql_read32(qdev, STS);
623 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400624 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700625
626 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
627 if (atomic_dec_and_test(&ctx->irq_cnt)) {
628 ql_write32(qdev, INTR_EN,
629 ctx->intr_en_mask);
630 var = ql_read32(qdev, STS);
631 }
632 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
633 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634}
635
636static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
637{
638 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700639 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400640
Ron Mercerbb0d2152008-10-20 10:30:26 -0700641 /* HW disables for us if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
644 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
645 return 0;
646
647 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000648 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700649 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400650 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700651 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400652 var = ql_read32(qdev, STS);
653 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700654 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000655 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400656 return var;
657}
658
659static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
660{
661 int i;
662 for (i = 0; i < qdev->intr_count; i++) {
663 /* The enable call does a atomic_dec_and_test
664 * and enables only if the result is zero.
665 * So we precharge it here.
666 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700667 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
668 i == 0))
669 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400670 ql_enable_completion_interrupt(qdev, i);
671 }
672
673}
674
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000675static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
676{
677 int status, i;
678 u16 csum = 0;
679 __le16 *flash = (__le16 *)&qdev->flash;
680
681 status = strncmp((char *)&qdev->flash, str, 4);
682 if (status) {
683 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
684 return status;
685 }
686
687 for (i = 0; i < size; i++)
688 csum += le16_to_cpu(*flash++);
689
690 if (csum)
691 QPRINTK(qdev, IFUP, ERR,
692 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
693
694 return csum;
695}
696
Ron Mercer26351472009-02-02 13:53:57 -0800697static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400698{
699 int status = 0;
700 /* wait for reg to come ready */
701 status = ql_wait_reg_rdy(qdev,
702 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
703 if (status)
704 goto exit;
705 /* set up for reg read */
706 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
707 /* wait for reg to come ready */
708 status = ql_wait_reg_rdy(qdev,
709 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
710 if (status)
711 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800712 /* This data is stored on flash as an array of
713 * __le32. Since ql_read32() returns cpu endian
714 * we need to swap it back.
715 */
716 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400717exit:
718 return status;
719}
720
Ron Mercercdca8d02009-03-02 08:07:31 +0000721static int ql_get_8000_flash_params(struct ql_adapter *qdev)
722{
723 u32 i, size;
724 int status;
725 __le32 *p = (__le32 *)&qdev->flash;
726 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000727 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000728
729 /* Get flash offset for function and adjust
730 * for dword access.
731 */
Ron Mercere4552f52009-06-09 05:39:32 +0000732 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000733 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
734 else
735 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
736
737 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
738 return -ETIMEDOUT;
739
740 size = sizeof(struct flash_params_8000) / sizeof(u32);
741 for (i = 0; i < size; i++, p++) {
742 status = ql_read_flash_word(qdev, i+offset, p);
743 if (status) {
744 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
745 goto exit;
746 }
747 }
748
749 status = ql_validate_flash(qdev,
750 sizeof(struct flash_params_8000) / sizeof(u16),
751 "8000");
752 if (status) {
753 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
754 status = -EINVAL;
755 goto exit;
756 }
757
Ron Mercer542512e2009-06-09 05:39:33 +0000758 /* Extract either manufacturer or BOFM modified
759 * MAC address.
760 */
761 if (qdev->flash.flash_params_8000.data_type1 == 2)
762 memcpy(mac_addr,
763 qdev->flash.flash_params_8000.mac_addr1,
764 qdev->ndev->addr_len);
765 else
766 memcpy(mac_addr,
767 qdev->flash.flash_params_8000.mac_addr,
768 qdev->ndev->addr_len);
769
770 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000771 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
772 status = -EINVAL;
773 goto exit;
774 }
775
776 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000777 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 qdev->ndev->addr_len);
779
780exit:
781 ql_sem_unlock(qdev, SEM_FLASH_MASK);
782 return status;
783}
784
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000785static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400786{
787 int i;
788 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800789 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800790 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000791 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800792
793 /* Second function's parameters follow the first
794 * function's.
795 */
Ron Mercere4552f52009-06-09 05:39:32 +0000796 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000797 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400798
799 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
800 return -ETIMEDOUT;
801
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000802 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800803 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400804 if (status) {
805 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
806 goto exit;
807 }
808
809 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000810
811 status = ql_validate_flash(qdev,
812 sizeof(struct flash_params_8012) / sizeof(u16),
813 "8012");
814 if (status) {
815 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
816 status = -EINVAL;
817 goto exit;
818 }
819
820 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
821 status = -EINVAL;
822 goto exit;
823 }
824
825 memcpy(qdev->ndev->dev_addr,
826 qdev->flash.flash_params_8012.mac_addr,
827 qdev->ndev->addr_len);
828
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400829exit:
830 ql_sem_unlock(qdev, SEM_FLASH_MASK);
831 return status;
832}
833
834/* xgmac register are located behind the xgmac_addr and xgmac_data
835 * register pair. Each read/write requires us to wait for the ready
836 * bit before reading/writing the data.
837 */
838static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
839{
840 int status;
841 /* wait for reg to come ready */
842 status = ql_wait_reg_rdy(qdev,
843 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
844 if (status)
845 return status;
846 /* write the data to the data reg */
847 ql_write32(qdev, XGMAC_DATA, data);
848 /* trigger the write */
849 ql_write32(qdev, XGMAC_ADDR, reg);
850 return status;
851}
852
853/* xgmac register are located behind the xgmac_addr and xgmac_data
854 * register pair. Each read/write requires us to wait for the ready
855 * bit before reading/writing the data.
856 */
857int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
858{
859 int status = 0;
860 /* wait for reg to come ready */
861 status = ql_wait_reg_rdy(qdev,
862 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
863 if (status)
864 goto exit;
865 /* set up for reg read */
866 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 goto exit;
872 /* get the data */
873 *data = ql_read32(qdev, XGMAC_DATA);
874exit:
875 return status;
876}
877
878/* This is used for reading the 64-bit statistics regs. */
879int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
880{
881 int status = 0;
882 u32 hi = 0;
883 u32 lo = 0;
884
885 status = ql_read_xgmac_reg(qdev, reg, &lo);
886 if (status)
887 goto exit;
888
889 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
890 if (status)
891 goto exit;
892
893 *data = (u64) lo | ((u64) hi << 32);
894
895exit:
896 return status;
897}
898
Ron Mercercdca8d02009-03-02 08:07:31 +0000899static int ql_8000_port_initialize(struct ql_adapter *qdev)
900{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000901 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000902 /*
903 * Get MPI firmware version for driver banner
904 * and ethool info.
905 */
906 status = ql_mb_about_fw(qdev);
907 if (status)
908 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000909 status = ql_mb_get_fw_state(qdev);
910 if (status)
911 goto exit;
912 /* Wake up a worker to get/set the TX/RX frame sizes. */
913 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
914exit:
915 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000916}
917
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400918/* Take the MAC Core out of reset.
919 * Enable statistics counting.
920 * Take the transmitter/receiver out of reset.
921 * This functionality may be done in the MPI firmware at a
922 * later date.
923 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000924static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400925{
926 int status = 0;
927 u32 data;
928
929 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
930 /* Another function has the semaphore, so
931 * wait for the port init bit to come ready.
932 */
933 QPRINTK(qdev, LINK, INFO,
934 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
935 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
936 if (status) {
937 QPRINTK(qdev, LINK, CRIT,
938 "Port initialize timed out.\n");
939 }
940 return status;
941 }
942
943 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
944 /* Set the core reset. */
945 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
946 if (status)
947 goto end;
948 data |= GLOBAL_CFG_RESET;
949 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
950 if (status)
951 goto end;
952
953 /* Clear the core reset and turn on jumbo for receiver. */
954 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
955 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
956 data |= GLOBAL_CFG_TX_STAT_EN;
957 data |= GLOBAL_CFG_RX_STAT_EN;
958 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
959 if (status)
960 goto end;
961
962 /* Enable transmitter, and clear it's reset. */
963 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
964 if (status)
965 goto end;
966 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
967 data |= TX_CFG_EN; /* Enable the transmitter. */
968 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
969 if (status)
970 goto end;
971
972 /* Enable receiver and clear it's reset. */
973 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
974 if (status)
975 goto end;
976 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
977 data |= RX_CFG_EN; /* Enable the receiver. */
978 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
979 if (status)
980 goto end;
981
982 /* Turn on jumbo. */
983 status =
984 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
985 if (status)
986 goto end;
987 status =
988 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
989 if (status)
990 goto end;
991
992 /* Signal to the world that the port is enabled. */
993 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
994end:
995 ql_sem_unlock(qdev, qdev->xg_sem_mask);
996 return status;
997}
998
999/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001000static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001001{
1002 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1003 rx_ring->lbq_curr_idx++;
1004 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1005 rx_ring->lbq_curr_idx = 0;
1006 rx_ring->lbq_free_cnt++;
1007 return lbq_desc;
1008}
1009
1010/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001011static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001012{
1013 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1014 rx_ring->sbq_curr_idx++;
1015 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1016 rx_ring->sbq_curr_idx = 0;
1017 rx_ring->sbq_free_cnt++;
1018 return sbq_desc;
1019}
1020
1021/* Update an rx ring index. */
1022static void ql_update_cq(struct rx_ring *rx_ring)
1023{
1024 rx_ring->cnsmr_idx++;
1025 rx_ring->curr_entry++;
1026 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1027 rx_ring->cnsmr_idx = 0;
1028 rx_ring->curr_entry = rx_ring->cq_base;
1029 }
1030}
1031
1032static void ql_write_cq_idx(struct rx_ring *rx_ring)
1033{
1034 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1035}
1036
1037/* Process (refill) a large buffer queue. */
1038static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1039{
Ron Mercer49f21862009-02-23 10:42:16 +00001040 u32 clean_idx = rx_ring->lbq_clean_idx;
1041 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001042 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001043 u64 map;
1044 int i;
1045
1046 while (rx_ring->lbq_free_cnt > 16) {
1047 for (i = 0; i < 16; i++) {
1048 QPRINTK(qdev, RX_STATUS, DEBUG,
1049 "lbq: try cleaning clean_idx = %d.\n",
1050 clean_idx);
1051 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001052 if (lbq_desc->p.lbq_page == NULL) {
1053 QPRINTK(qdev, RX_STATUS, DEBUG,
1054 "lbq: getting new page for index %d.\n",
1055 lbq_desc->index);
1056 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1057 if (lbq_desc->p.lbq_page == NULL) {
Ron Mercer79d2b292009-02-12 16:38:34 -08001058 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001059 QPRINTK(qdev, RX_STATUS, ERR,
1060 "Couldn't get a page.\n");
1061 return;
1062 }
1063 map = pci_map_page(qdev->pdev,
1064 lbq_desc->p.lbq_page,
1065 0, PAGE_SIZE,
1066 PCI_DMA_FROMDEVICE);
1067 if (pci_dma_mapping_error(qdev->pdev, map)) {
Ron Mercer79d2b292009-02-12 16:38:34 -08001068 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerf2603c22009-02-12 16:37:32 -08001069 put_page(lbq_desc->p.lbq_page);
1070 lbq_desc->p.lbq_page = NULL;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001071 QPRINTK(qdev, RX_STATUS, ERR,
1072 "PCI mapping failed.\n");
1073 return;
1074 }
1075 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1076 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001077 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001078 }
1079 clean_idx++;
1080 if (clean_idx == rx_ring->lbq_len)
1081 clean_idx = 0;
1082 }
1083
1084 rx_ring->lbq_clean_idx = clean_idx;
1085 rx_ring->lbq_prod_idx += 16;
1086 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1087 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001088 rx_ring->lbq_free_cnt -= 16;
1089 }
1090
1091 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001092 QPRINTK(qdev, RX_STATUS, DEBUG,
1093 "lbq: updating prod idx = %d.\n",
1094 rx_ring->lbq_prod_idx);
1095 ql_write_db_reg(rx_ring->lbq_prod_idx,
1096 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097 }
1098}
1099
1100/* Process (refill) a small buffer queue. */
1101static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1102{
Ron Mercer49f21862009-02-23 10:42:16 +00001103 u32 clean_idx = rx_ring->sbq_clean_idx;
1104 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001105 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001106 u64 map;
1107 int i;
1108
1109 while (rx_ring->sbq_free_cnt > 16) {
1110 for (i = 0; i < 16; i++) {
1111 sbq_desc = &rx_ring->sbq[clean_idx];
1112 QPRINTK(qdev, RX_STATUS, DEBUG,
1113 "sbq: try cleaning clean_idx = %d.\n",
1114 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001115 if (sbq_desc->p.skb == NULL) {
1116 QPRINTK(qdev, RX_STATUS, DEBUG,
1117 "sbq: getting new skb for index %d.\n",
1118 sbq_desc->index);
1119 sbq_desc->p.skb =
1120 netdev_alloc_skb(qdev->ndev,
1121 rx_ring->sbq_buf_size);
1122 if (sbq_desc->p.skb == NULL) {
1123 QPRINTK(qdev, PROBE, ERR,
1124 "Couldn't get an skb.\n");
1125 rx_ring->sbq_clean_idx = clean_idx;
1126 return;
1127 }
1128 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1129 map = pci_map_single(qdev->pdev,
1130 sbq_desc->p.skb->data,
1131 rx_ring->sbq_buf_size /
1132 2, PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001133 if (pci_dma_mapping_error(qdev->pdev, map)) {
1134 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1135 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001136 dev_kfree_skb_any(sbq_desc->p.skb);
1137 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001138 return;
1139 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001140 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1141 pci_unmap_len_set(sbq_desc, maplen,
1142 rx_ring->sbq_buf_size / 2);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001143 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 }
1145
1146 clean_idx++;
1147 if (clean_idx == rx_ring->sbq_len)
1148 clean_idx = 0;
1149 }
1150 rx_ring->sbq_clean_idx = clean_idx;
1151 rx_ring->sbq_prod_idx += 16;
1152 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1153 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001154 rx_ring->sbq_free_cnt -= 16;
1155 }
1156
1157 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001158 QPRINTK(qdev, RX_STATUS, DEBUG,
1159 "sbq: updating prod idx = %d.\n",
1160 rx_ring->sbq_prod_idx);
1161 ql_write_db_reg(rx_ring->sbq_prod_idx,
1162 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001163 }
1164}
1165
1166static void ql_update_buffer_queues(struct ql_adapter *qdev,
1167 struct rx_ring *rx_ring)
1168{
1169 ql_update_sbq(qdev, rx_ring);
1170 ql_update_lbq(qdev, rx_ring);
1171}
1172
1173/* Unmaps tx buffers. Can be called from send() if a pci mapping
1174 * fails at some stage, or from the interrupt when a tx completes.
1175 */
1176static void ql_unmap_send(struct ql_adapter *qdev,
1177 struct tx_ring_desc *tx_ring_desc, int mapped)
1178{
1179 int i;
1180 for (i = 0; i < mapped; i++) {
1181 if (i == 0 || (i == 7 && mapped > 7)) {
1182 /*
1183 * Unmap the skb->data area, or the
1184 * external sglist (AKA the Outbound
1185 * Address List (OAL)).
1186 * If its the zeroeth element, then it's
1187 * the skb->data area. If it's the 7th
1188 * element and there is more than 6 frags,
1189 * then its an OAL.
1190 */
1191 if (i == 7) {
1192 QPRINTK(qdev, TX_DONE, DEBUG,
1193 "unmapping OAL area.\n");
1194 }
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(&tx_ring_desc->map[i],
1197 mapaddr),
1198 pci_unmap_len(&tx_ring_desc->map[i],
1199 maplen),
1200 PCI_DMA_TODEVICE);
1201 } else {
1202 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1203 i);
1204 pci_unmap_page(qdev->pdev,
1205 pci_unmap_addr(&tx_ring_desc->map[i],
1206 mapaddr),
1207 pci_unmap_len(&tx_ring_desc->map[i],
1208 maplen), PCI_DMA_TODEVICE);
1209 }
1210 }
1211
1212}
1213
1214/* Map the buffers for this transmit. This will return
1215 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1216 */
1217static int ql_map_send(struct ql_adapter *qdev,
1218 struct ob_mac_iocb_req *mac_iocb_ptr,
1219 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1220{
1221 int len = skb_headlen(skb);
1222 dma_addr_t map;
1223 int frag_idx, err, map_idx = 0;
1224 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1225 int frag_cnt = skb_shinfo(skb)->nr_frags;
1226
1227 if (frag_cnt) {
1228 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1229 }
1230 /*
1231 * Map the skb buffer first.
1232 */
1233 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1234
1235 err = pci_dma_mapping_error(qdev->pdev, map);
1236 if (err) {
1237 QPRINTK(qdev, TX_QUEUED, ERR,
1238 "PCI mapping failed with error: %d\n", err);
1239
1240 return NETDEV_TX_BUSY;
1241 }
1242
1243 tbd->len = cpu_to_le32(len);
1244 tbd->addr = cpu_to_le64(map);
1245 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1246 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1247 map_idx++;
1248
1249 /*
1250 * This loop fills the remainder of the 8 address descriptors
1251 * in the IOCB. If there are more than 7 fragments, then the
1252 * eighth address desc will point to an external list (OAL).
1253 * When this happens, the remainder of the frags will be stored
1254 * in this list.
1255 */
1256 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1257 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1258 tbd++;
1259 if (frag_idx == 6 && frag_cnt > 7) {
1260 /* Let's tack on an sglist.
1261 * Our control block will now
1262 * look like this:
1263 * iocb->seg[0] = skb->data
1264 * iocb->seg[1] = frag[0]
1265 * iocb->seg[2] = frag[1]
1266 * iocb->seg[3] = frag[2]
1267 * iocb->seg[4] = frag[3]
1268 * iocb->seg[5] = frag[4]
1269 * iocb->seg[6] = frag[5]
1270 * iocb->seg[7] = ptr to OAL (external sglist)
1271 * oal->seg[0] = frag[6]
1272 * oal->seg[1] = frag[7]
1273 * oal->seg[2] = frag[8]
1274 * oal->seg[3] = frag[9]
1275 * oal->seg[4] = frag[10]
1276 * etc...
1277 */
1278 /* Tack on the OAL in the eighth segment of IOCB. */
1279 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1280 sizeof(struct oal),
1281 PCI_DMA_TODEVICE);
1282 err = pci_dma_mapping_error(qdev->pdev, map);
1283 if (err) {
1284 QPRINTK(qdev, TX_QUEUED, ERR,
1285 "PCI mapping outbound address list with error: %d\n",
1286 err);
1287 goto map_error;
1288 }
1289
1290 tbd->addr = cpu_to_le64(map);
1291 /*
1292 * The length is the number of fragments
1293 * that remain to be mapped times the length
1294 * of our sglist (OAL).
1295 */
1296 tbd->len =
1297 cpu_to_le32((sizeof(struct tx_buf_desc) *
1298 (frag_cnt - frag_idx)) | TX_DESC_C);
1299 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1300 map);
1301 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1302 sizeof(struct oal));
1303 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1304 map_idx++;
1305 }
1306
1307 map =
1308 pci_map_page(qdev->pdev, frag->page,
1309 frag->page_offset, frag->size,
1310 PCI_DMA_TODEVICE);
1311
1312 err = pci_dma_mapping_error(qdev->pdev, map);
1313 if (err) {
1314 QPRINTK(qdev, TX_QUEUED, ERR,
1315 "PCI mapping frags failed with error: %d.\n",
1316 err);
1317 goto map_error;
1318 }
1319
1320 tbd->addr = cpu_to_le64(map);
1321 tbd->len = cpu_to_le32(frag->size);
1322 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1323 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1324 frag->size);
1325
1326 }
1327 /* Save the number of segments we've mapped. */
1328 tx_ring_desc->map_cnt = map_idx;
1329 /* Terminate the last segment. */
1330 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1331 return NETDEV_TX_OK;
1332
1333map_error:
1334 /*
1335 * If the first frag mapping failed, then i will be zero.
1336 * This causes the unmap of the skb->data area. Otherwise
1337 * we pass in the number of frags that mapped successfully
1338 * so they can be umapped.
1339 */
1340 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1341 return NETDEV_TX_BUSY;
1342}
1343
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001344static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001345{
1346 void *temp_addr = skb->data;
1347
1348 /* Undo the skb_reserve(skb,32) we did before
1349 * giving to hardware, and realign data on
1350 * a 2-byte boundary.
1351 */
1352 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1353 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1354 skb_copy_to_linear_data(skb, temp_addr,
1355 (unsigned int)len);
1356}
1357
1358/*
1359 * This function builds an skb for the given inbound
1360 * completion. It will be rewritten for readability in the near
1361 * future, but for not it works well.
1362 */
1363static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1364 struct rx_ring *rx_ring,
1365 struct ib_mac_iocb_rsp *ib_mac_rsp)
1366{
1367 struct bq_desc *lbq_desc;
1368 struct bq_desc *sbq_desc;
1369 struct sk_buff *skb = NULL;
1370 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1371 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1372
1373 /*
1374 * Handle the header buffer if present.
1375 */
1376 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1377 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1378 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1379 /*
1380 * Headers fit nicely into a small buffer.
1381 */
1382 sbq_desc = ql_get_curr_sbuf(rx_ring);
1383 pci_unmap_single(qdev->pdev,
1384 pci_unmap_addr(sbq_desc, mapaddr),
1385 pci_unmap_len(sbq_desc, maplen),
1386 PCI_DMA_FROMDEVICE);
1387 skb = sbq_desc->p.skb;
1388 ql_realign_skb(skb, hdr_len);
1389 skb_put(skb, hdr_len);
1390 sbq_desc->p.skb = NULL;
1391 }
1392
1393 /*
1394 * Handle the data buffer(s).
1395 */
1396 if (unlikely(!length)) { /* Is there data too? */
1397 QPRINTK(qdev, RX_STATUS, DEBUG,
1398 "No Data buffer in this packet.\n");
1399 return skb;
1400 }
1401
1402 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1403 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG,
1405 "Headers in small, data of %d bytes in small, combine them.\n", length);
1406 /*
1407 * Data is less than small buffer size so it's
1408 * stuffed in a small buffer.
1409 * For this case we append the data
1410 * from the "data" small buffer to the "header" small
1411 * buffer.
1412 */
1413 sbq_desc = ql_get_curr_sbuf(rx_ring);
1414 pci_dma_sync_single_for_cpu(qdev->pdev,
1415 pci_unmap_addr
1416 (sbq_desc, mapaddr),
1417 pci_unmap_len
1418 (sbq_desc, maplen),
1419 PCI_DMA_FROMDEVICE);
1420 memcpy(skb_put(skb, length),
1421 sbq_desc->p.skb->data, length);
1422 pci_dma_sync_single_for_device(qdev->pdev,
1423 pci_unmap_addr
1424 (sbq_desc,
1425 mapaddr),
1426 pci_unmap_len
1427 (sbq_desc,
1428 maplen),
1429 PCI_DMA_FROMDEVICE);
1430 } else {
1431 QPRINTK(qdev, RX_STATUS, DEBUG,
1432 "%d bytes in a single small buffer.\n", length);
1433 sbq_desc = ql_get_curr_sbuf(rx_ring);
1434 skb = sbq_desc->p.skb;
1435 ql_realign_skb(skb, length);
1436 skb_put(skb, length);
1437 pci_unmap_single(qdev->pdev,
1438 pci_unmap_addr(sbq_desc,
1439 mapaddr),
1440 pci_unmap_len(sbq_desc,
1441 maplen),
1442 PCI_DMA_FROMDEVICE);
1443 sbq_desc->p.skb = NULL;
1444 }
1445 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1446 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1447 QPRINTK(qdev, RX_STATUS, DEBUG,
1448 "Header in small, %d bytes in large. Chain large to small!\n", length);
1449 /*
1450 * The data is in a single large buffer. We
1451 * chain it to the header buffer's skb and let
1452 * it rip.
1453 */
1454 lbq_desc = ql_get_curr_lbuf(rx_ring);
1455 pci_unmap_page(qdev->pdev,
1456 pci_unmap_addr(lbq_desc,
1457 mapaddr),
1458 pci_unmap_len(lbq_desc, maplen),
1459 PCI_DMA_FROMDEVICE);
1460 QPRINTK(qdev, RX_STATUS, DEBUG,
1461 "Chaining page to skb.\n");
1462 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1463 0, length);
1464 skb->len += length;
1465 skb->data_len += length;
1466 skb->truesize += length;
1467 lbq_desc->p.lbq_page = NULL;
1468 } else {
1469 /*
1470 * The headers and data are in a single large buffer. We
1471 * copy it to a new skb and let it go. This can happen with
1472 * jumbo mtu on a non-TCP/UDP frame.
1473 */
1474 lbq_desc = ql_get_curr_lbuf(rx_ring);
1475 skb = netdev_alloc_skb(qdev->ndev, length);
1476 if (skb == NULL) {
1477 QPRINTK(qdev, PROBE, DEBUG,
1478 "No skb available, drop the packet.\n");
1479 return NULL;
1480 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001481 pci_unmap_page(qdev->pdev,
1482 pci_unmap_addr(lbq_desc,
1483 mapaddr),
1484 pci_unmap_len(lbq_desc, maplen),
1485 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001486 skb_reserve(skb, NET_IP_ALIGN);
1487 QPRINTK(qdev, RX_STATUS, DEBUG,
1488 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1489 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1490 0, length);
1491 skb->len += length;
1492 skb->data_len += length;
1493 skb->truesize += length;
1494 length -= length;
1495 lbq_desc->p.lbq_page = NULL;
1496 __pskb_pull_tail(skb,
1497 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1498 VLAN_ETH_HLEN : ETH_HLEN);
1499 }
1500 } else {
1501 /*
1502 * The data is in a chain of large buffers
1503 * pointed to by a small buffer. We loop
1504 * thru and chain them to the our small header
1505 * buffer's skb.
1506 * frags: There are 18 max frags and our small
1507 * buffer will hold 32 of them. The thing is,
1508 * we'll use 3 max for our 9000 byte jumbo
1509 * frames. If the MTU goes up we could
1510 * eventually be in trouble.
1511 */
1512 int size, offset, i = 0;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001513 __le64 *bq, bq_array[8];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001514 sbq_desc = ql_get_curr_sbuf(rx_ring);
1515 pci_unmap_single(qdev->pdev,
1516 pci_unmap_addr(sbq_desc, mapaddr),
1517 pci_unmap_len(sbq_desc, maplen),
1518 PCI_DMA_FROMDEVICE);
1519 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1520 /*
1521 * This is an non TCP/UDP IP frame, so
1522 * the headers aren't split into a small
1523 * buffer. We have to use the small buffer
1524 * that contains our sg list as our skb to
1525 * send upstairs. Copy the sg list here to
1526 * a local buffer and use it to find the
1527 * pages to chain.
1528 */
1529 QPRINTK(qdev, RX_STATUS, DEBUG,
1530 "%d bytes of headers & data in chain of large.\n", length);
1531 skb = sbq_desc->p.skb;
1532 bq = &bq_array[0];
1533 memcpy(bq, skb->data, sizeof(bq_array));
1534 sbq_desc->p.skb = NULL;
1535 skb_reserve(skb, NET_IP_ALIGN);
1536 } else {
1537 QPRINTK(qdev, RX_STATUS, DEBUG,
1538 "Headers in small, %d bytes of data in chain of large.\n", length);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001539 bq = (__le64 *)sbq_desc->p.skb->data;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001540 }
1541 while (length > 0) {
1542 lbq_desc = ql_get_curr_lbuf(rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001543 pci_unmap_page(qdev->pdev,
1544 pci_unmap_addr(lbq_desc,
1545 mapaddr),
1546 pci_unmap_len(lbq_desc,
1547 maplen),
1548 PCI_DMA_FROMDEVICE);
1549 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1550 offset = 0;
1551
1552 QPRINTK(qdev, RX_STATUS, DEBUG,
1553 "Adding page %d to skb for %d bytes.\n",
1554 i, size);
1555 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1556 offset, size);
1557 skb->len += size;
1558 skb->data_len += size;
1559 skb->truesize += size;
1560 length -= size;
1561 lbq_desc->p.lbq_page = NULL;
1562 bq++;
1563 i++;
1564 }
1565 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1566 VLAN_ETH_HLEN : ETH_HLEN);
1567 }
1568 return skb;
1569}
1570
1571/* Process an inbound completion from an rx ring. */
1572static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp)
1575{
1576 struct net_device *ndev = qdev->ndev;
1577 struct sk_buff *skb = NULL;
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001578 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1579 IB_MAC_IOCB_RSP_VLAN_MASK)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001580
1581 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1582
1583 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1584 if (unlikely(!skb)) {
1585 QPRINTK(qdev, RX_STATUS, DEBUG,
1586 "No skb available, drop packet.\n");
1587 return;
1588 }
1589
Ron Mercera32959c2009-06-09 05:39:27 +00001590 /* Frame error, so drop the packet. */
1591 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1592 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1593 ib_mac_rsp->flags2);
1594 dev_kfree_skb_any(skb);
1595 return;
1596 }
Ron Mercerec33a492009-06-09 05:39:28 +00001597
1598 /* The max framesize filter on this chip is set higher than
1599 * MTU since FCoE uses 2k frames.
1600 */
1601 if (skb->len > ndev->mtu + ETH_HLEN) {
1602 dev_kfree_skb_any(skb);
1603 return;
1604 }
1605
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001606 prefetch(skb->data);
1607 skb->dev = ndev;
1608 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1609 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1610 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1612 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1613 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1614 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1615 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1616 }
1617 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1618 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1619 }
Ron Mercerd555f592009-03-09 10:59:19 +00001620
Ron Mercerd555f592009-03-09 10:59:19 +00001621 skb->protocol = eth_type_trans(skb, ndev);
1622 skb->ip_summed = CHECKSUM_NONE;
1623
1624 /* If rx checksum is on, and there are no
1625 * csum or frame errors.
1626 */
1627 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001628 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1629 /* TCP frame. */
1630 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1631 QPRINTK(qdev, RX_STATUS, DEBUG,
1632 "TCP checksum done!\n");
1633 skb->ip_summed = CHECKSUM_UNNECESSARY;
1634 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1635 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1636 /* Unfragmented ipv4 UDP frame. */
1637 struct iphdr *iph = (struct iphdr *) skb->data;
1638 if (!(iph->frag_off &
1639 cpu_to_be16(IP_MF|IP_OFFSET))) {
1640 skb->ip_summed = CHECKSUM_UNNECESSARY;
1641 QPRINTK(qdev, RX_STATUS, DEBUG,
1642 "TCP checksum done!\n");
1643 }
1644 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001645 }
Ron Mercerd555f592009-03-09 10:59:19 +00001646
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001647 ndev->stats.rx_packets++;
1648 ndev->stats.rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001649 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001650 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1651 if (qdev->vlgrp &&
1652 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1653 (vlan_id != 0))
1654 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1655 vlan_id, skb);
1656 else
1657 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001658 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001659 if (qdev->vlgrp &&
1660 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1661 (vlan_id != 0))
1662 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1663 else
1664 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001665 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001666}
1667
1668/* Process an outbound completion from an rx ring. */
1669static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1670 struct ob_mac_iocb_rsp *mac_rsp)
1671{
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001672 struct net_device *ndev = qdev->ndev;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001673 struct tx_ring *tx_ring;
1674 struct tx_ring_desc *tx_ring_desc;
1675
1676 QL_DUMP_OB_MAC_RSP(mac_rsp);
1677 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1678 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1679 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ajit Khapardebcc90f52009-10-07 02:46:09 +00001680 ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1681 ndev->stats.tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001682 dev_kfree_skb(tx_ring_desc->skb);
1683 tx_ring_desc->skb = NULL;
1684
1685 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1686 OB_MAC_IOCB_RSP_S |
1687 OB_MAC_IOCB_RSP_L |
1688 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1689 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1690 QPRINTK(qdev, TX_DONE, WARNING,
1691 "Total descriptor length did not match transfer length.\n");
1692 }
1693 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1694 QPRINTK(qdev, TX_DONE, WARNING,
1695 "Frame too short to be legal, not sent.\n");
1696 }
1697 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1698 QPRINTK(qdev, TX_DONE, WARNING,
1699 "Frame too long, but sent anyway.\n");
1700 }
1701 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1702 QPRINTK(qdev, TX_DONE, WARNING,
1703 "PCI backplane error. Frame not sent.\n");
1704 }
1705 }
1706 atomic_inc(&tx_ring->tx_count);
1707}
1708
1709/* Fire up a handler to reset the MPI processor. */
1710void ql_queue_fw_error(struct ql_adapter *qdev)
1711{
Ron Mercer6a473302009-07-02 06:06:12 +00001712 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001713 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1714}
1715
1716void ql_queue_asic_error(struct ql_adapter *qdev)
1717{
Ron Mercer6a473302009-07-02 06:06:12 +00001718 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001719 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001720 /* Clear adapter up bit to signal the recovery
1721 * process that it shouldn't kill the reset worker
1722 * thread
1723 */
1724 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001725 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1726}
1727
1728static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1729 struct ib_ae_iocb_rsp *ib_ae_rsp)
1730{
1731 switch (ib_ae_rsp->event) {
1732 case MGMT_ERR_EVENT:
1733 QPRINTK(qdev, RX_ERR, ERR,
1734 "Management Processor Fatal Error.\n");
1735 ql_queue_fw_error(qdev);
1736 return;
1737
1738 case CAM_LOOKUP_ERR_EVENT:
1739 QPRINTK(qdev, LINK, ERR,
1740 "Multiple CAM hits lookup occurred.\n");
1741 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1742 ql_queue_asic_error(qdev);
1743 return;
1744
1745 case SOFT_ECC_ERROR_EVENT:
1746 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1747 ql_queue_asic_error(qdev);
1748 break;
1749
1750 case PCI_ERR_ANON_BUF_RD:
1751 QPRINTK(qdev, RX_ERR, ERR,
1752 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1753 ib_ae_rsp->q_id);
1754 ql_queue_asic_error(qdev);
1755 break;
1756
1757 default:
1758 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1759 ib_ae_rsp->event);
1760 ql_queue_asic_error(qdev);
1761 break;
1762 }
1763}
1764
1765static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1766{
1767 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001768 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001769 struct ob_mac_iocb_rsp *net_rsp = NULL;
1770 int count = 0;
1771
Ron Mercer1e213302009-03-09 10:59:21 +00001772 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001773 /* While there are entries in the completion queue. */
1774 while (prod != rx_ring->cnsmr_idx) {
1775
1776 QPRINTK(qdev, RX_STATUS, DEBUG,
1777 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1778 prod, rx_ring->cnsmr_idx);
1779
1780 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1781 rmb();
1782 switch (net_rsp->opcode) {
1783
1784 case OPCODE_OB_MAC_TSO_IOCB:
1785 case OPCODE_OB_MAC_IOCB:
1786 ql_process_mac_tx_intr(qdev, net_rsp);
1787 break;
1788 default:
1789 QPRINTK(qdev, RX_STATUS, DEBUG,
1790 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1791 net_rsp->opcode);
1792 }
1793 count++;
1794 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001795 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 }
1797 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00001798 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1799 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1800 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001801 if (atomic_read(&tx_ring->queue_stopped) &&
1802 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1803 /*
1804 * The queue got stopped because the tx_ring was full.
1805 * Wake it up, because it's now at least 25% empty.
1806 */
Ron Mercer1e213302009-03-09 10:59:21 +00001807 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001808 }
1809
1810 return count;
1811}
1812
1813static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1814{
1815 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001816 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001817 struct ql_net_rsp_iocb *net_rsp;
1818 int count = 0;
1819
1820 /* While there are entries in the completion queue. */
1821 while (prod != rx_ring->cnsmr_idx) {
1822
1823 QPRINTK(qdev, RX_STATUS, DEBUG,
1824 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1825 prod, rx_ring->cnsmr_idx);
1826
1827 net_rsp = rx_ring->curr_entry;
1828 rmb();
1829 switch (net_rsp->opcode) {
1830 case OPCODE_IB_MAC_IOCB:
1831 ql_process_mac_rx_intr(qdev, rx_ring,
1832 (struct ib_mac_iocb_rsp *)
1833 net_rsp);
1834 break;
1835
1836 case OPCODE_IB_AE_IOCB:
1837 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1838 net_rsp);
1839 break;
1840 default:
1841 {
1842 QPRINTK(qdev, RX_STATUS, DEBUG,
1843 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1844 net_rsp->opcode);
1845 }
1846 }
1847 count++;
1848 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001849 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001850 if (count == budget)
1851 break;
1852 }
1853 ql_update_buffer_queues(qdev, rx_ring);
1854 ql_write_cq_idx(rx_ring);
1855 return count;
1856}
1857
1858static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1859{
1860 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1861 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00001862 struct rx_ring *trx_ring;
1863 int i, work_done = 0;
1864 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001865
1866 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1867 rx_ring->cq_id);
1868
Ron Mercer39aa8162009-08-27 11:02:11 +00001869 /* Service the TX rings first. They start
1870 * right after the RSS rings. */
1871 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1872 trx_ring = &qdev->rx_ring[i];
1873 /* If this TX completion ring belongs to this vector and
1874 * it's not empty then service it.
1875 */
1876 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1877 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1878 trx_ring->cnsmr_idx)) {
1879 QPRINTK(qdev, INTR, DEBUG,
1880 "%s: Servicing TX completion ring %d.\n",
1881 __func__, trx_ring->cq_id);
1882 ql_clean_outbound_rx_ring(trx_ring);
1883 }
1884 }
1885
1886 /*
1887 * Now service the RSS ring if it's active.
1888 */
1889 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1890 rx_ring->cnsmr_idx) {
1891 QPRINTK(qdev, INTR, DEBUG,
1892 "%s: Servicing RX completion ring %d.\n",
1893 __func__, rx_ring->cq_id);
1894 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1895 }
1896
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001897 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001898 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1900 }
1901 return work_done;
1902}
1903
1904static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1905{
1906 struct ql_adapter *qdev = netdev_priv(ndev);
1907
1908 qdev->vlgrp = grp;
1909 if (grp) {
1910 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1911 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1912 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1913 } else {
1914 QPRINTK(qdev, IFUP, DEBUG,
1915 "Turning off VLAN in NIC_RCV_CFG.\n");
1916 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1917 }
1918}
1919
1920static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1921{
1922 struct ql_adapter *qdev = netdev_priv(ndev);
1923 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00001924 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001925
Ron Mercercc288f52009-02-23 10:42:14 +00001926 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1927 if (status)
1928 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929 if (ql_set_mac_addr_reg
1930 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1931 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1932 }
Ron Mercercc288f52009-02-23 10:42:14 +00001933 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001934}
1935
1936static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1937{
1938 struct ql_adapter *qdev = netdev_priv(ndev);
1939 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00001940 int status;
1941
1942 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1943 if (status)
1944 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001945
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001946 if (ql_set_mac_addr_reg
1947 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1948 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1949 }
Ron Mercercc288f52009-02-23 10:42:14 +00001950 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001951
1952}
1953
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001954/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1955static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1956{
1957 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08001958 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001959 return IRQ_HANDLED;
1960}
1961
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001962/* This handles a fatal error, MPI activity, and the default
1963 * rx_ring in an MSI-X multiple vector environment.
1964 * In MSI/Legacy environment it also process the rest of
1965 * the rx_rings.
1966 */
1967static irqreturn_t qlge_isr(int irq, void *dev_id)
1968{
1969 struct rx_ring *rx_ring = dev_id;
1970 struct ql_adapter *qdev = rx_ring->qdev;
1971 struct intr_context *intr_context = &qdev->intr_context[0];
1972 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001973 int work_done = 0;
1974
Ron Mercerbb0d2152008-10-20 10:30:26 -07001975 spin_lock(&qdev->hw_lock);
1976 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1977 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1978 spin_unlock(&qdev->hw_lock);
1979 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001980 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07001981 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001982
Ron Mercerbb0d2152008-10-20 10:30:26 -07001983 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001984
1985 /*
1986 * Check for fatal error.
1987 */
1988 if (var & STS_FE) {
1989 ql_queue_asic_error(qdev);
1990 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1991 var = ql_read32(qdev, ERR_STS);
1992 QPRINTK(qdev, INTR, ERR,
1993 "Resetting chip. Error Status Register = 0x%x\n", var);
1994 return IRQ_HANDLED;
1995 }
1996
1997 /*
1998 * Check MPI processor activity.
1999 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002000 if ((var & STS_PI) &&
2001 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002002 /*
2003 * We've got an async event or mailbox completion.
2004 * Handle it and clear the source of the interrupt.
2005 */
2006 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2007 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002008 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2009 queue_delayed_work_on(smp_processor_id(),
2010 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002011 work_done++;
2012 }
2013
2014 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002015 * Get the bit-mask that shows the active queues for this
2016 * pass. Compare it to the queues that this irq services
2017 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002018 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002019 var = ql_read32(qdev, ISR1);
2020 if (var & intr_context->irq_mask) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002021 QPRINTK(qdev, INTR, INFO,
Ron Mercer39aa8162009-08-27 11:02:11 +00002022 "Waking handler for rx_ring[0].\n");
2023 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ben Hutchings288379f2009-01-19 16:43:59 -08002024 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002025 work_done++;
2026 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002027 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002028 return work_done ? IRQ_HANDLED : IRQ_NONE;
2029}
2030
2031static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2032{
2033
2034 if (skb_is_gso(skb)) {
2035 int err;
2036 if (skb_header_cloned(skb)) {
2037 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2038 if (err)
2039 return err;
2040 }
2041
2042 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2043 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2044 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2045 mac_iocb_ptr->total_hdrs_len =
2046 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2047 mac_iocb_ptr->net_trans_offset =
2048 cpu_to_le16(skb_network_offset(skb) |
2049 skb_transport_offset(skb)
2050 << OB_MAC_TRANSPORT_HDR_SHIFT);
2051 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2052 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2053 if (likely(skb->protocol == htons(ETH_P_IP))) {
2054 struct iphdr *iph = ip_hdr(skb);
2055 iph->check = 0;
2056 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2057 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2058 iph->daddr, 0,
2059 IPPROTO_TCP,
2060 0);
2061 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2062 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2063 tcp_hdr(skb)->check =
2064 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2065 &ipv6_hdr(skb)->daddr,
2066 0, IPPROTO_TCP, 0);
2067 }
2068 return 1;
2069 }
2070 return 0;
2071}
2072
2073static void ql_hw_csum_setup(struct sk_buff *skb,
2074 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2075{
2076 int len;
2077 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002078 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002079 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2080 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2081 mac_iocb_ptr->net_trans_offset =
2082 cpu_to_le16(skb_network_offset(skb) |
2083 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2084
2085 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2086 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2087 if (likely(iph->protocol == IPPROTO_TCP)) {
2088 check = &(tcp_hdr(skb)->check);
2089 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2090 mac_iocb_ptr->total_hdrs_len =
2091 cpu_to_le16(skb_transport_offset(skb) +
2092 (tcp_hdr(skb)->doff << 2));
2093 } else {
2094 check = &(udp_hdr(skb)->check);
2095 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2096 mac_iocb_ptr->total_hdrs_len =
2097 cpu_to_le16(skb_transport_offset(skb) +
2098 sizeof(struct udphdr));
2099 }
2100 *check = ~csum_tcpudp_magic(iph->saddr,
2101 iph->daddr, len, iph->protocol, 0);
2102}
2103
Stephen Hemminger613573252009-08-31 19:50:58 +00002104static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002105{
2106 struct tx_ring_desc *tx_ring_desc;
2107 struct ob_mac_iocb_req *mac_iocb_ptr;
2108 struct ql_adapter *qdev = netdev_priv(ndev);
2109 int tso;
2110 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002111 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002112
2113 tx_ring = &qdev->tx_ring[tx_ring_idx];
2114
Ron Mercer74c50b42009-03-09 10:59:27 +00002115 if (skb_padto(skb, ETH_ZLEN))
2116 return NETDEV_TX_OK;
2117
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002118 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2119 QPRINTK(qdev, TX_QUEUED, INFO,
2120 "%s: shutting down tx queue %d du to lack of resources.\n",
2121 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002122 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 atomic_inc(&tx_ring->queue_stopped);
2124 return NETDEV_TX_BUSY;
2125 }
2126 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2127 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002128 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002129
2130 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2131 mac_iocb_ptr->tid = tx_ring_desc->index;
2132 /* We use the upper 32-bits to store the tx queue for this IO.
2133 * When we get the completion we can use it to establish the context.
2134 */
2135 mac_iocb_ptr->txq_idx = tx_ring_idx;
2136 tx_ring_desc->skb = skb;
2137
2138 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2139
2140 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2141 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2142 vlan_tx_tag_get(skb));
2143 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2144 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2145 }
2146 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2147 if (tso < 0) {
2148 dev_kfree_skb_any(skb);
2149 return NETDEV_TX_OK;
2150 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2151 ql_hw_csum_setup(skb,
2152 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2153 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002154 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2155 NETDEV_TX_OK) {
2156 QPRINTK(qdev, TX_QUEUED, ERR,
2157 "Could not map the segments.\n");
2158 return NETDEV_TX_BUSY;
2159 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2161 tx_ring->prod_idx++;
2162 if (tx_ring->prod_idx == tx_ring->wq_len)
2163 tx_ring->prod_idx = 0;
2164 wmb();
2165
2166 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2168 tx_ring->prod_idx, skb->len);
2169
2170 atomic_dec(&tx_ring->tx_count);
2171 return NETDEV_TX_OK;
2172}
2173
2174static void ql_free_shadow_space(struct ql_adapter *qdev)
2175{
2176 if (qdev->rx_ring_shadow_reg_area) {
2177 pci_free_consistent(qdev->pdev,
2178 PAGE_SIZE,
2179 qdev->rx_ring_shadow_reg_area,
2180 qdev->rx_ring_shadow_reg_dma);
2181 qdev->rx_ring_shadow_reg_area = NULL;
2182 }
2183 if (qdev->tx_ring_shadow_reg_area) {
2184 pci_free_consistent(qdev->pdev,
2185 PAGE_SIZE,
2186 qdev->tx_ring_shadow_reg_area,
2187 qdev->tx_ring_shadow_reg_dma);
2188 qdev->tx_ring_shadow_reg_area = NULL;
2189 }
2190}
2191
2192static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2193{
2194 qdev->rx_ring_shadow_reg_area =
2195 pci_alloc_consistent(qdev->pdev,
2196 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2197 if (qdev->rx_ring_shadow_reg_area == NULL) {
2198 QPRINTK(qdev, IFUP, ERR,
2199 "Allocation of RX shadow space failed.\n");
2200 return -ENOMEM;
2201 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002202 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002203 qdev->tx_ring_shadow_reg_area =
2204 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2205 &qdev->tx_ring_shadow_reg_dma);
2206 if (qdev->tx_ring_shadow_reg_area == NULL) {
2207 QPRINTK(qdev, IFUP, ERR,
2208 "Allocation of TX shadow space failed.\n");
2209 goto err_wqp_sh_area;
2210 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002211 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002212 return 0;
2213
2214err_wqp_sh_area:
2215 pci_free_consistent(qdev->pdev,
2216 PAGE_SIZE,
2217 qdev->rx_ring_shadow_reg_area,
2218 qdev->rx_ring_shadow_reg_dma);
2219 return -ENOMEM;
2220}
2221
2222static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2223{
2224 struct tx_ring_desc *tx_ring_desc;
2225 int i;
2226 struct ob_mac_iocb_req *mac_iocb_ptr;
2227
2228 mac_iocb_ptr = tx_ring->wq_base;
2229 tx_ring_desc = tx_ring->q;
2230 for (i = 0; i < tx_ring->wq_len; i++) {
2231 tx_ring_desc->index = i;
2232 tx_ring_desc->skb = NULL;
2233 tx_ring_desc->queue_entry = mac_iocb_ptr;
2234 mac_iocb_ptr++;
2235 tx_ring_desc++;
2236 }
2237 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2238 atomic_set(&tx_ring->queue_stopped, 0);
2239}
2240
2241static void ql_free_tx_resources(struct ql_adapter *qdev,
2242 struct tx_ring *tx_ring)
2243{
2244 if (tx_ring->wq_base) {
2245 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2246 tx_ring->wq_base, tx_ring->wq_base_dma);
2247 tx_ring->wq_base = NULL;
2248 }
2249 kfree(tx_ring->q);
2250 tx_ring->q = NULL;
2251}
2252
2253static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2254 struct tx_ring *tx_ring)
2255{
2256 tx_ring->wq_base =
2257 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2258 &tx_ring->wq_base_dma);
2259
2260 if ((tx_ring->wq_base == NULL)
Ron Mercer88c55e32009-06-10 15:49:33 +00002261 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002262 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2263 return -ENOMEM;
2264 }
2265 tx_ring->q =
2266 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2267 if (tx_ring->q == NULL)
2268 goto err;
2269
2270 return 0;
2271err:
2272 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2273 tx_ring->wq_base, tx_ring->wq_base_dma);
2274 return -ENOMEM;
2275}
2276
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002277static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002278{
2279 int i;
2280 struct bq_desc *lbq_desc;
2281
2282 for (i = 0; i < rx_ring->lbq_len; i++) {
2283 lbq_desc = &rx_ring->lbq[i];
2284 if (lbq_desc->p.lbq_page) {
2285 pci_unmap_page(qdev->pdev,
2286 pci_unmap_addr(lbq_desc, mapaddr),
2287 pci_unmap_len(lbq_desc, maplen),
2288 PCI_DMA_FROMDEVICE);
2289
2290 put_page(lbq_desc->p.lbq_page);
2291 lbq_desc->p.lbq_page = NULL;
2292 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002293 }
2294}
2295
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002296static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002297{
2298 int i;
2299 struct bq_desc *sbq_desc;
2300
2301 for (i = 0; i < rx_ring->sbq_len; i++) {
2302 sbq_desc = &rx_ring->sbq[i];
2303 if (sbq_desc == NULL) {
2304 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2305 return;
2306 }
2307 if (sbq_desc->p.skb) {
2308 pci_unmap_single(qdev->pdev,
2309 pci_unmap_addr(sbq_desc, mapaddr),
2310 pci_unmap_len(sbq_desc, maplen),
2311 PCI_DMA_FROMDEVICE);
2312 dev_kfree_skb(sbq_desc->p.skb);
2313 sbq_desc->p.skb = NULL;
2314 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002315 }
2316}
2317
Ron Mercer4545a3f2009-02-23 10:42:17 +00002318/* Free all large and small rx buffers associated
2319 * with the completion queues for this device.
2320 */
2321static void ql_free_rx_buffers(struct ql_adapter *qdev)
2322{
2323 int i;
2324 struct rx_ring *rx_ring;
2325
2326 for (i = 0; i < qdev->rx_ring_count; i++) {
2327 rx_ring = &qdev->rx_ring[i];
2328 if (rx_ring->lbq)
2329 ql_free_lbq_buffers(qdev, rx_ring);
2330 if (rx_ring->sbq)
2331 ql_free_sbq_buffers(qdev, rx_ring);
2332 }
2333}
2334
2335static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2336{
2337 struct rx_ring *rx_ring;
2338 int i;
2339
2340 for (i = 0; i < qdev->rx_ring_count; i++) {
2341 rx_ring = &qdev->rx_ring[i];
2342 if (rx_ring->type != TX_Q)
2343 ql_update_buffer_queues(qdev, rx_ring);
2344 }
2345}
2346
2347static void ql_init_lbq_ring(struct ql_adapter *qdev,
2348 struct rx_ring *rx_ring)
2349{
2350 int i;
2351 struct bq_desc *lbq_desc;
2352 __le64 *bq = rx_ring->lbq_base;
2353
2354 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2355 for (i = 0; i < rx_ring->lbq_len; i++) {
2356 lbq_desc = &rx_ring->lbq[i];
2357 memset(lbq_desc, 0, sizeof(*lbq_desc));
2358 lbq_desc->index = i;
2359 lbq_desc->addr = bq;
2360 bq++;
2361 }
2362}
2363
2364static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002365 struct rx_ring *rx_ring)
2366{
2367 int i;
2368 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002369 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002370
Ron Mercer4545a3f2009-02-23 10:42:17 +00002371 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002372 for (i = 0; i < rx_ring->sbq_len; i++) {
2373 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002374 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002375 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002376 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002377 bq++;
2378 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002379}
2380
2381static void ql_free_rx_resources(struct ql_adapter *qdev,
2382 struct rx_ring *rx_ring)
2383{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384 /* Free the small buffer queue. */
2385 if (rx_ring->sbq_base) {
2386 pci_free_consistent(qdev->pdev,
2387 rx_ring->sbq_size,
2388 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2389 rx_ring->sbq_base = NULL;
2390 }
2391
2392 /* Free the small buffer queue control blocks. */
2393 kfree(rx_ring->sbq);
2394 rx_ring->sbq = NULL;
2395
2396 /* Free the large buffer queue. */
2397 if (rx_ring->lbq_base) {
2398 pci_free_consistent(qdev->pdev,
2399 rx_ring->lbq_size,
2400 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2401 rx_ring->lbq_base = NULL;
2402 }
2403
2404 /* Free the large buffer queue control blocks. */
2405 kfree(rx_ring->lbq);
2406 rx_ring->lbq = NULL;
2407
2408 /* Free the rx queue. */
2409 if (rx_ring->cq_base) {
2410 pci_free_consistent(qdev->pdev,
2411 rx_ring->cq_size,
2412 rx_ring->cq_base, rx_ring->cq_base_dma);
2413 rx_ring->cq_base = NULL;
2414 }
2415}
2416
2417/* Allocate queues and buffers for this completions queue based
2418 * on the values in the parameter structure. */
2419static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2420 struct rx_ring *rx_ring)
2421{
2422
2423 /*
2424 * Allocate the completion queue for this rx_ring.
2425 */
2426 rx_ring->cq_base =
2427 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2428 &rx_ring->cq_base_dma);
2429
2430 if (rx_ring->cq_base == NULL) {
2431 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2432 return -ENOMEM;
2433 }
2434
2435 if (rx_ring->sbq_len) {
2436 /*
2437 * Allocate small buffer queue.
2438 */
2439 rx_ring->sbq_base =
2440 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2441 &rx_ring->sbq_base_dma);
2442
2443 if (rx_ring->sbq_base == NULL) {
2444 QPRINTK(qdev, IFUP, ERR,
2445 "Small buffer queue allocation failed.\n");
2446 goto err_mem;
2447 }
2448
2449 /*
2450 * Allocate small buffer queue control blocks.
2451 */
2452 rx_ring->sbq =
2453 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2454 GFP_KERNEL);
2455 if (rx_ring->sbq == NULL) {
2456 QPRINTK(qdev, IFUP, ERR,
2457 "Small buffer queue control block allocation failed.\n");
2458 goto err_mem;
2459 }
2460
Ron Mercer4545a3f2009-02-23 10:42:17 +00002461 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002462 }
2463
2464 if (rx_ring->lbq_len) {
2465 /*
2466 * Allocate large buffer queue.
2467 */
2468 rx_ring->lbq_base =
2469 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2470 &rx_ring->lbq_base_dma);
2471
2472 if (rx_ring->lbq_base == NULL) {
2473 QPRINTK(qdev, IFUP, ERR,
2474 "Large buffer queue allocation failed.\n");
2475 goto err_mem;
2476 }
2477 /*
2478 * Allocate large buffer queue control blocks.
2479 */
2480 rx_ring->lbq =
2481 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2482 GFP_KERNEL);
2483 if (rx_ring->lbq == NULL) {
2484 QPRINTK(qdev, IFUP, ERR,
2485 "Large buffer queue control block allocation failed.\n");
2486 goto err_mem;
2487 }
2488
Ron Mercer4545a3f2009-02-23 10:42:17 +00002489 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002490 }
2491
2492 return 0;
2493
2494err_mem:
2495 ql_free_rx_resources(qdev, rx_ring);
2496 return -ENOMEM;
2497}
2498
2499static void ql_tx_ring_clean(struct ql_adapter *qdev)
2500{
2501 struct tx_ring *tx_ring;
2502 struct tx_ring_desc *tx_ring_desc;
2503 int i, j;
2504
2505 /*
2506 * Loop through all queues and free
2507 * any resources.
2508 */
2509 for (j = 0; j < qdev->tx_ring_count; j++) {
2510 tx_ring = &qdev->tx_ring[j];
2511 for (i = 0; i < tx_ring->wq_len; i++) {
2512 tx_ring_desc = &tx_ring->q[i];
2513 if (tx_ring_desc && tx_ring_desc->skb) {
2514 QPRINTK(qdev, IFDOWN, ERR,
2515 "Freeing lost SKB %p, from queue %d, index %d.\n",
2516 tx_ring_desc->skb, j,
2517 tx_ring_desc->index);
2518 ql_unmap_send(qdev, tx_ring_desc,
2519 tx_ring_desc->map_cnt);
2520 dev_kfree_skb(tx_ring_desc->skb);
2521 tx_ring_desc->skb = NULL;
2522 }
2523 }
2524 }
2525}
2526
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002527static void ql_free_mem_resources(struct ql_adapter *qdev)
2528{
2529 int i;
2530
2531 for (i = 0; i < qdev->tx_ring_count; i++)
2532 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2533 for (i = 0; i < qdev->rx_ring_count; i++)
2534 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2535 ql_free_shadow_space(qdev);
2536}
2537
2538static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2539{
2540 int i;
2541
2542 /* Allocate space for our shadow registers and such. */
2543 if (ql_alloc_shadow_space(qdev))
2544 return -ENOMEM;
2545
2546 for (i = 0; i < qdev->rx_ring_count; i++) {
2547 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2548 QPRINTK(qdev, IFUP, ERR,
2549 "RX resource allocation failed.\n");
2550 goto err_mem;
2551 }
2552 }
2553 /* Allocate tx queue resources */
2554 for (i = 0; i < qdev->tx_ring_count; i++) {
2555 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2556 QPRINTK(qdev, IFUP, ERR,
2557 "TX resource allocation failed.\n");
2558 goto err_mem;
2559 }
2560 }
2561 return 0;
2562
2563err_mem:
2564 ql_free_mem_resources(qdev);
2565 return -ENOMEM;
2566}
2567
2568/* Set up the rx ring control block and pass it to the chip.
2569 * The control block is defined as
2570 * "Completion Queue Initialization Control Block", or cqicb.
2571 */
2572static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2573{
2574 struct cqicb *cqicb = &rx_ring->cqicb;
2575 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002576 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002577 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002578 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002579 void __iomem *doorbell_area =
2580 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2581 int err = 0;
2582 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002583 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002584 __le64 *base_indirect_ptr;
2585 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002586
2587 /* Set up the shadow registers for this ring. */
2588 rx_ring->prod_idx_sh_reg = shadow_reg;
2589 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2590 shadow_reg += sizeof(u64);
2591 shadow_reg_dma += sizeof(u64);
2592 rx_ring->lbq_base_indirect = shadow_reg;
2593 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002594 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2595 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002596 rx_ring->sbq_base_indirect = shadow_reg;
2597 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2598
2599 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002600 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002601 rx_ring->cnsmr_idx = 0;
2602 rx_ring->curr_entry = rx_ring->cq_base;
2603
2604 /* PCI doorbell mem area + 0x04 for valid register */
2605 rx_ring->valid_db_reg = doorbell_area + 0x04;
2606
2607 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002608 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002609
2610 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002611 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002612
2613 memset((void *)cqicb, 0, sizeof(struct cqicb));
2614 cqicb->msix_vect = rx_ring->irq;
2615
Ron Mercer459caf52009-01-04 17:08:11 -08002616 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2617 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002618
Ron Mercer97345522009-01-09 11:31:50 +00002619 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002620
Ron Mercer97345522009-01-09 11:31:50 +00002621 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002622
2623 /*
2624 * Set up the control block load flags.
2625 */
2626 cqicb->flags = FLAGS_LC | /* Load queue base address */
2627 FLAGS_LV | /* Load MSI-X vector */
2628 FLAGS_LI; /* Load irq delay values */
2629 if (rx_ring->lbq_len) {
2630 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002631 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002632 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2633 page_entries = 0;
2634 do {
2635 *base_indirect_ptr = cpu_to_le64(tmp);
2636 tmp += DB_PAGE_SIZE;
2637 base_indirect_ptr++;
2638 page_entries++;
2639 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002640 cqicb->lbq_addr =
2641 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002642 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2643 (u16) rx_ring->lbq_buf_size;
2644 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2645 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2646 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002647 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002648 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002649 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002650 rx_ring->lbq_clean_idx = 0;
2651 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002652 }
2653 if (rx_ring->sbq_len) {
2654 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002655 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002656 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2657 page_entries = 0;
2658 do {
2659 *base_indirect_ptr = cpu_to_le64(tmp);
2660 tmp += DB_PAGE_SIZE;
2661 base_indirect_ptr++;
2662 page_entries++;
2663 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002664 cqicb->sbq_addr =
2665 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002666 cqicb->sbq_buf_size =
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002667 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
Ron Mercer459caf52009-01-04 17:08:11 -08002668 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2669 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002670 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002671 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002672 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002673 rx_ring->sbq_clean_idx = 0;
2674 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002675 }
2676 switch (rx_ring->type) {
2677 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002678 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2679 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2680 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002681 case RX_Q:
2682 /* Inbound completion handling rx_rings run in
2683 * separate NAPI contexts.
2684 */
2685 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2686 64);
2687 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2688 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2689 break;
2690 default:
2691 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2692 rx_ring->type);
2693 }
Ron Mercer49740972009-02-26 10:08:36 +00002694 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002695 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2696 CFG_LCQ, rx_ring->cq_id);
2697 if (err) {
2698 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2699 return err;
2700 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002701 return err;
2702}
2703
2704static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2705{
2706 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2707 void __iomem *doorbell_area =
2708 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2709 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2710 (tx_ring->wq_id * sizeof(u64));
2711 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2712 (tx_ring->wq_id * sizeof(u64));
2713 int err = 0;
2714
2715 /*
2716 * Assign doorbell registers for this tx_ring.
2717 */
2718 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002719 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002720 tx_ring->prod_idx = 0;
2721 /* TX PCI doorbell mem area + 0x04 */
2722 tx_ring->valid_db_reg = doorbell_area + 0x04;
2723
2724 /*
2725 * Assign shadow registers for this tx_ring.
2726 */
2727 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2728 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2729
2730 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2731 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2732 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2733 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2734 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002735 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002736
Ron Mercer97345522009-01-09 11:31:50 +00002737 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002738
2739 ql_init_tx_ring(qdev, tx_ring);
2740
Ron Mercere3324712009-07-02 06:06:13 +00002741 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002742 (u16) tx_ring->wq_id);
2743 if (err) {
2744 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2745 return err;
2746 }
Ron Mercer49740972009-02-26 10:08:36 +00002747 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002748 return err;
2749}
2750
2751static void ql_disable_msix(struct ql_adapter *qdev)
2752{
2753 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2754 pci_disable_msix(qdev->pdev);
2755 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2756 kfree(qdev->msi_x_entry);
2757 qdev->msi_x_entry = NULL;
2758 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2759 pci_disable_msi(qdev->pdev);
2760 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2761 }
2762}
2763
Ron Mercera4ab6132009-08-27 11:02:10 +00002764/* We start by trying to get the number of vectors
2765 * stored in qdev->intr_count. If we don't get that
2766 * many then we reduce the count and try again.
2767 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002768static void ql_enable_msix(struct ql_adapter *qdev)
2769{
Ron Mercera4ab6132009-08-27 11:02:10 +00002770 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002771
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002772 /* Get the MSIX vectors. */
2773 if (irq_type == MSIX_IRQ) {
2774 /* Try to alloc space for the msix struct,
2775 * if it fails then go to MSI/legacy.
2776 */
Ron Mercera4ab6132009-08-27 11:02:10 +00002777 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002778 sizeof(struct msix_entry),
2779 GFP_KERNEL);
2780 if (!qdev->msi_x_entry) {
2781 irq_type = MSI_IRQ;
2782 goto msi;
2783 }
2784
Ron Mercera4ab6132009-08-27 11:02:10 +00002785 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002786 qdev->msi_x_entry[i].entry = i;
2787
Ron Mercera4ab6132009-08-27 11:02:10 +00002788 /* Loop to get our vectors. We start with
2789 * what we want and settle for what we get.
2790 */
2791 do {
2792 err = pci_enable_msix(qdev->pdev,
2793 qdev->msi_x_entry, qdev->intr_count);
2794 if (err > 0)
2795 qdev->intr_count = err;
2796 } while (err > 0);
2797
2798 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002799 kfree(qdev->msi_x_entry);
2800 qdev->msi_x_entry = NULL;
2801 QPRINTK(qdev, IFUP, WARNING,
2802 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00002803 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002804 irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00002805 } else if (err == 0) {
2806 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2807 QPRINTK(qdev, IFUP, INFO,
2808 "MSI-X Enabled, got %d vectors.\n",
2809 qdev->intr_count);
2810 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002811 }
2812 }
2813msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00002814 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002815 if (irq_type == MSI_IRQ) {
2816 if (!pci_enable_msi(qdev->pdev)) {
2817 set_bit(QL_MSI_ENABLED, &qdev->flags);
2818 QPRINTK(qdev, IFUP, INFO,
2819 "Running with MSI interrupts.\n");
2820 return;
2821 }
2822 }
2823 irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002824 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2825}
2826
Ron Mercer39aa8162009-08-27 11:02:11 +00002827/* Each vector services 1 RSS ring and and 1 or more
2828 * TX completion rings. This function loops through
2829 * the TX completion rings and assigns the vector that
2830 * will service it. An example would be if there are
2831 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2832 * This would mean that vector 0 would service RSS ring 0
2833 * and TX competion rings 0,1,2 and 3. Vector 1 would
2834 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2835 */
2836static void ql_set_tx_vect(struct ql_adapter *qdev)
2837{
2838 int i, j, vect;
2839 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2840
2841 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2842 /* Assign irq vectors to TX rx_rings.*/
2843 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2844 i < qdev->rx_ring_count; i++) {
2845 if (j == tx_rings_per_vector) {
2846 vect++;
2847 j = 0;
2848 }
2849 qdev->rx_ring[i].irq = vect;
2850 j++;
2851 }
2852 } else {
2853 /* For single vector all rings have an irq
2854 * of zero.
2855 */
2856 for (i = 0; i < qdev->rx_ring_count; i++)
2857 qdev->rx_ring[i].irq = 0;
2858 }
2859}
2860
2861/* Set the interrupt mask for this vector. Each vector
2862 * will service 1 RSS ring and 1 or more TX completion
2863 * rings. This function sets up a bit mask per vector
2864 * that indicates which rings it services.
2865 */
2866static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2867{
2868 int j, vect = ctx->intr;
2869 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2870
2871 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2872 /* Add the RSS ring serviced by this vector
2873 * to the mask.
2874 */
2875 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2876 /* Add the TX ring(s) serviced by this vector
2877 * to the mask. */
2878 for (j = 0; j < tx_rings_per_vector; j++) {
2879 ctx->irq_mask |=
2880 (1 << qdev->rx_ring[qdev->rss_ring_count +
2881 (vect * tx_rings_per_vector) + j].cq_id);
2882 }
2883 } else {
2884 /* For single vector we just shift each queue's
2885 * ID into the mask.
2886 */
2887 for (j = 0; j < qdev->rx_ring_count; j++)
2888 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2889 }
2890}
2891
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002892/*
2893 * Here we build the intr_context structures based on
2894 * our rx_ring count and intr vector count.
2895 * The intr_context structure is used to hook each vector
2896 * to possibly different handlers.
2897 */
2898static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2899{
2900 int i = 0;
2901 struct intr_context *intr_context = &qdev->intr_context[0];
2902
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002903 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2904 /* Each rx_ring has it's
2905 * own intr_context since we have separate
2906 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002907 */
2908 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2909 qdev->rx_ring[i].irq = i;
2910 intr_context->intr = i;
2911 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002912 /* Set up this vector's bit-mask that indicates
2913 * which queues it services.
2914 */
2915 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002916 /*
2917 * We set up each vectors enable/disable/read bits so
2918 * there's no bit/mask calculations in the critical path.
2919 */
2920 intr_context->intr_en_mask =
2921 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2922 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2923 | i;
2924 intr_context->intr_dis_mask =
2925 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2926 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2927 INTR_EN_IHD | i;
2928 intr_context->intr_read_mask =
2929 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2930 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2931 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00002932 if (i == 0) {
2933 /* The first vector/queue handles
2934 * broadcast/multicast, fatal errors,
2935 * and firmware events. This in addition
2936 * to normal inbound NAPI processing.
2937 */
2938 intr_context->handler = qlge_isr;
2939 sprintf(intr_context->name, "%s-rx-%d",
2940 qdev->ndev->name, i);
2941 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002942 /*
2943 * Inbound queues handle unicast frames only.
2944 */
2945 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002946 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002947 qdev->ndev->name, i);
2948 }
2949 }
2950 } else {
2951 /*
2952 * All rx_rings use the same intr_context since
2953 * there is only one vector.
2954 */
2955 intr_context->intr = 0;
2956 intr_context->qdev = qdev;
2957 /*
2958 * We set up each vectors enable/disable/read bits so
2959 * there's no bit/mask calculations in the critical path.
2960 */
2961 intr_context->intr_en_mask =
2962 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2963 intr_context->intr_dis_mask =
2964 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2965 INTR_EN_TYPE_DISABLE;
2966 intr_context->intr_read_mask =
2967 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2968 /*
2969 * Single interrupt means one handler for all rings.
2970 */
2971 intr_context->handler = qlge_isr;
2972 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00002973 /* Set up this vector's bit-mask that indicates
2974 * which queues it services. In this case there is
2975 * a single vector so it will service all RSS and
2976 * TX completion rings.
2977 */
2978 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002979 }
Ron Mercer39aa8162009-08-27 11:02:11 +00002980 /* Tell the TX completion rings which MSIx vector
2981 * they will be using.
2982 */
2983 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002984}
2985
2986static void ql_free_irq(struct ql_adapter *qdev)
2987{
2988 int i;
2989 struct intr_context *intr_context = &qdev->intr_context[0];
2990
2991 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2992 if (intr_context->hooked) {
2993 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2994 free_irq(qdev->msi_x_entry[i].vector,
2995 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00002996 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002997 "freeing msix interrupt %d.\n", i);
2998 } else {
2999 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00003000 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003001 "freeing msi interrupt %d.\n", i);
3002 }
3003 }
3004 }
3005 ql_disable_msix(qdev);
3006}
3007
3008static int ql_request_irq(struct ql_adapter *qdev)
3009{
3010 int i;
3011 int status = 0;
3012 struct pci_dev *pdev = qdev->pdev;
3013 struct intr_context *intr_context = &qdev->intr_context[0];
3014
3015 ql_resolve_queues_to_irqs(qdev);
3016
3017 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3018 atomic_set(&intr_context->irq_cnt, 0);
3019 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3020 status = request_irq(qdev->msi_x_entry[i].vector,
3021 intr_context->handler,
3022 0,
3023 intr_context->name,
3024 &qdev->rx_ring[i]);
3025 if (status) {
3026 QPRINTK(qdev, IFUP, ERR,
3027 "Failed request for MSIX interrupt %d.\n",
3028 i);
3029 goto err_irq;
3030 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003031 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003032 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3033 i,
3034 qdev->rx_ring[i].type ==
3035 DEFAULT_Q ? "DEFAULT_Q" : "",
3036 qdev->rx_ring[i].type ==
3037 TX_Q ? "TX_Q" : "",
3038 qdev->rx_ring[i].type ==
3039 RX_Q ? "RX_Q" : "", intr_context->name);
3040 }
3041 } else {
3042 QPRINTK(qdev, IFUP, DEBUG,
3043 "trying msi or legacy interrupts.\n");
3044 QPRINTK(qdev, IFUP, DEBUG,
3045 "%s: irq = %d.\n", __func__, pdev->irq);
3046 QPRINTK(qdev, IFUP, DEBUG,
3047 "%s: context->name = %s.\n", __func__,
3048 intr_context->name);
3049 QPRINTK(qdev, IFUP, DEBUG,
3050 "%s: dev_id = 0x%p.\n", __func__,
3051 &qdev->rx_ring[0]);
3052 status =
3053 request_irq(pdev->irq, qlge_isr,
3054 test_bit(QL_MSI_ENABLED,
3055 &qdev->
3056 flags) ? 0 : IRQF_SHARED,
3057 intr_context->name, &qdev->rx_ring[0]);
3058 if (status)
3059 goto err_irq;
3060
3061 QPRINTK(qdev, IFUP, ERR,
3062 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3063 i,
3064 qdev->rx_ring[0].type ==
3065 DEFAULT_Q ? "DEFAULT_Q" : "",
3066 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3067 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3068 intr_context->name);
3069 }
3070 intr_context->hooked = 1;
3071 }
3072 return status;
3073err_irq:
3074 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3075 ql_free_irq(qdev);
3076 return status;
3077}
3078
3079static int ql_start_rss(struct ql_adapter *qdev)
3080{
3081 struct ricb *ricb = &qdev->ricb;
3082 int status = 0;
3083 int i;
3084 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3085
Ron Mercere3324712009-07-02 06:06:13 +00003086 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003087
Ron Mercerb2014ff2009-08-27 11:02:09 +00003088 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003089 ricb->flags =
3090 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
3091 RSS_RT6);
3092 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
3093
3094 /*
3095 * Fill out the Indirection Table.
3096 */
Ron Mercerdef48b62009-02-12 16:38:18 -08003097 for (i = 0; i < 256; i++)
3098 hash_id[i] = i & (qdev->rss_ring_count - 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003099
3100 /*
3101 * Random values for the IPv6 and IPv4 Hash Keys.
3102 */
3103 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
3104 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
3105
Ron Mercer49740972009-02-26 10:08:36 +00003106 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003107
Ron Mercere3324712009-07-02 06:06:13 +00003108 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003109 if (status) {
3110 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3111 return status;
3112 }
Ron Mercer49740972009-02-26 10:08:36 +00003113 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003114 return status;
3115}
3116
Ron Mercera5f59dc2009-07-02 06:06:07 +00003117static int ql_clear_routing_entries(struct ql_adapter *qdev)
3118{
3119 int i, status = 0;
3120
3121 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3122 if (status)
3123 return status;
3124 /* Clear all the entries in the routing table. */
3125 for (i = 0; i < 16; i++) {
3126 status = ql_set_routing_reg(qdev, i, 0, 0);
3127 if (status) {
3128 QPRINTK(qdev, IFUP, ERR,
3129 "Failed to init routing register for CAM "
3130 "packets.\n");
3131 break;
3132 }
3133 }
3134 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3135 return status;
3136}
3137
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003138/* Initialize the frame-to-queue routing. */
3139static int ql_route_initialize(struct ql_adapter *qdev)
3140{
3141 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142
3143 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003144 status = ql_clear_routing_entries(qdev);
3145 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003146 return status;
3147
3148 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3149 if (status)
3150 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003151
3152 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3153 if (status) {
3154 QPRINTK(qdev, IFUP, ERR,
3155 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003156 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003157 }
3158 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3159 if (status) {
3160 QPRINTK(qdev, IFUP, ERR,
3161 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003162 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003163 }
3164 /* If we have more than one inbound queue, then turn on RSS in the
3165 * routing block.
3166 */
3167 if (qdev->rss_ring_count > 1) {
3168 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3169 RT_IDX_RSS_MATCH, 1);
3170 if (status) {
3171 QPRINTK(qdev, IFUP, ERR,
3172 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003173 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003174 }
3175 }
3176
3177 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3178 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003179 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003180 QPRINTK(qdev, IFUP, ERR,
3181 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003182exit:
3183 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003184 return status;
3185}
3186
Ron Mercer2ee1e272009-03-03 12:10:33 +00003187int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003188{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003189 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003190
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003191 /* If check if the link is up and use to
3192 * determine if we are setting or clearing
3193 * the MAC address in the CAM.
3194 */
3195 set = ql_read32(qdev, STS);
3196 set &= qdev->port_link_up;
3197 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003198 if (status) {
3199 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3200 return status;
3201 }
3202
3203 status = ql_route_initialize(qdev);
3204 if (status)
3205 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3206
3207 return status;
3208}
3209
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003210static int ql_adapter_initialize(struct ql_adapter *qdev)
3211{
3212 u32 value, mask;
3213 int i;
3214 int status = 0;
3215
3216 /*
3217 * Set up the System register to halt on errors.
3218 */
3219 value = SYS_EFE | SYS_FAE;
3220 mask = value << 16;
3221 ql_write32(qdev, SYS, mask | value);
3222
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003223 /* Set the default queue, and VLAN behavior. */
3224 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3225 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3227
3228 /* Set the MPI interrupt to enabled. */
3229 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3230
3231 /* Enable the function, set pagesize, enable error checking. */
3232 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3233 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3234
3235 /* Set/clear header splitting. */
3236 mask = FSC_VM_PAGESIZE_MASK |
3237 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3238 ql_write32(qdev, FSC, mask | value);
3239
3240 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3241 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3242
3243 /* Start up the rx queues. */
3244 for (i = 0; i < qdev->rx_ring_count; i++) {
3245 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3246 if (status) {
3247 QPRINTK(qdev, IFUP, ERR,
3248 "Failed to start rx ring[%d].\n", i);
3249 return status;
3250 }
3251 }
3252
3253 /* If there is more than one inbound completion queue
3254 * then download a RICB to configure RSS.
3255 */
3256 if (qdev->rss_ring_count > 1) {
3257 status = ql_start_rss(qdev);
3258 if (status) {
3259 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3260 return status;
3261 }
3262 }
3263
3264 /* Start up the tx queues. */
3265 for (i = 0; i < qdev->tx_ring_count; i++) {
3266 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3267 if (status) {
3268 QPRINTK(qdev, IFUP, ERR,
3269 "Failed to start tx ring[%d].\n", i);
3270 return status;
3271 }
3272 }
3273
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003274 /* Initialize the port and set the max framesize. */
3275 status = qdev->nic_ops->port_initialize(qdev);
3276 if (status) {
3277 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3278 return status;
3279 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003280
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003281 /* Set up the MAC address and frame routing filter. */
3282 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003283 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003284 QPRINTK(qdev, IFUP, ERR,
3285 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003286 return status;
3287 }
3288
3289 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003290 for (i = 0; i < qdev->rss_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003291 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003292 i);
3293 napi_enable(&qdev->rx_ring[i].napi);
3294 }
3295
3296 return status;
3297}
3298
3299/* Issue soft reset to chip. */
3300static int ql_adapter_reset(struct ql_adapter *qdev)
3301{
3302 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003303 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003304 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003305
Ron Mercera5f59dc2009-07-02 06:06:07 +00003306 /* Clear all the entries in the routing table. */
3307 status = ql_clear_routing_entries(qdev);
3308 if (status) {
3309 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3310 return status;
3311 }
3312
3313 end_jiffies = jiffies +
3314 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003315 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003316
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003317 do {
3318 value = ql_read32(qdev, RST_FO);
3319 if ((value & RST_FO_FR) == 0)
3320 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003321 cpu_relax();
3322 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003323
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003324 if (value & RST_FO_FR) {
3325 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003326 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003327 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003328 }
3329
3330 return status;
3331}
3332
3333static void ql_display_dev_info(struct net_device *ndev)
3334{
3335 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3336
3337 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003338 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003339 "XG Roll = %d, XG Rev = %d.\n",
3340 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003341 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003342 qdev->chip_rev_id & 0x0000000f,
3343 qdev->chip_rev_id >> 4 & 0x0000000f,
3344 qdev->chip_rev_id >> 8 & 0x0000000f,
3345 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003346 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003347}
3348
3349static int ql_adapter_down(struct ql_adapter *qdev)
3350{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003351 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003352
Ron Mercer6a473302009-07-02 06:06:12 +00003353 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003354
Ron Mercer6497b602009-02-12 16:37:13 -08003355 /* Don't kill the reset worker thread if we
3356 * are in the process of recovery.
3357 */
3358 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3359 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003360 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3361 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003362 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003363 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003364
Ron Mercer39aa8162009-08-27 11:02:11 +00003365 for (i = 0; i < qdev->rss_ring_count; i++)
3366 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003367
3368 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3369
3370 ql_disable_interrupts(qdev);
3371
3372 ql_tx_ring_clean(qdev);
3373
Ron Mercer6b318cb2009-03-09 10:59:26 +00003374 /* Call netif_napi_del() from common point.
3375 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003376 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003377 netif_napi_del(&qdev->rx_ring[i].napi);
3378
Ron Mercer4545a3f2009-02-23 10:42:17 +00003379 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003380
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003381 status = ql_adapter_reset(qdev);
3382 if (status)
3383 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3384 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003385 return status;
3386}
3387
3388static int ql_adapter_up(struct ql_adapter *qdev)
3389{
3390 int err = 0;
3391
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003392 err = ql_adapter_initialize(qdev);
3393 if (err) {
3394 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003395 goto err_init;
3396 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003397 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003398 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003399 /* If the port is initialized and the
3400 * link is up the turn on the carrier.
3401 */
3402 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3403 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003404 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003405 ql_enable_interrupts(qdev);
3406 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003407 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003408
3409 return 0;
3410err_init:
3411 ql_adapter_reset(qdev);
3412 return err;
3413}
3414
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003415static void ql_release_adapter_resources(struct ql_adapter *qdev)
3416{
3417 ql_free_mem_resources(qdev);
3418 ql_free_irq(qdev);
3419}
3420
3421static int ql_get_adapter_resources(struct ql_adapter *qdev)
3422{
3423 int status = 0;
3424
3425 if (ql_alloc_mem_resources(qdev)) {
3426 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3427 return -ENOMEM;
3428 }
3429 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003430 return status;
3431}
3432
3433static int qlge_close(struct net_device *ndev)
3434{
3435 struct ql_adapter *qdev = netdev_priv(ndev);
3436
3437 /*
3438 * Wait for device to recover from a reset.
3439 * (Rarely happens, but possible.)
3440 */
3441 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3442 msleep(1);
3443 ql_adapter_down(qdev);
3444 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003445 return 0;
3446}
3447
3448static int ql_configure_rings(struct ql_adapter *qdev)
3449{
3450 int i;
3451 struct rx_ring *rx_ring;
3452 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003453 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003454
Ron Mercera4ab6132009-08-27 11:02:10 +00003455 /* In a perfect world we have one RSS ring for each CPU
3456 * and each has it's own vector. To do that we ask for
3457 * cpu_cnt vectors. ql_enable_msix() will adjust the
3458 * vector count to what we actually get. We then
3459 * allocate an RSS ring for each.
3460 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003461 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003462 qdev->intr_count = cpu_cnt;
3463 ql_enable_msix(qdev);
3464 /* Adjust the RSS ring count to the actual vector count. */
3465 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003466 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003467 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003468
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003469 for (i = 0; i < qdev->tx_ring_count; i++) {
3470 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003471 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003472 tx_ring->qdev = qdev;
3473 tx_ring->wq_id = i;
3474 tx_ring->wq_len = qdev->tx_ring_size;
3475 tx_ring->wq_size =
3476 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3477
3478 /*
3479 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00003480 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003481 */
Ron Mercer39aa8162009-08-27 11:02:11 +00003482 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003483 }
3484
3485 for (i = 0; i < qdev->rx_ring_count; i++) {
3486 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003487 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003488 rx_ring->qdev = qdev;
3489 rx_ring->cq_id = i;
3490 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003491 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00003492 /*
3493 * Inbound (RSS) queues.
3494 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003495 rx_ring->cq_len = qdev->rx_ring_size;
3496 rx_ring->cq_size =
3497 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3498 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3499 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003500 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003501 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3502 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3503 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003504 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003505 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003506 rx_ring->type = RX_Q;
3507 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003508 /*
3509 * Outbound queue handles outbound completions only.
3510 */
3511 /* outbound cq is same size as tx_ring it services. */
3512 rx_ring->cq_len = qdev->tx_ring_size;
3513 rx_ring->cq_size =
3514 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3515 rx_ring->lbq_len = 0;
3516 rx_ring->lbq_size = 0;
3517 rx_ring->lbq_buf_size = 0;
3518 rx_ring->sbq_len = 0;
3519 rx_ring->sbq_size = 0;
3520 rx_ring->sbq_buf_size = 0;
3521 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003522 }
3523 }
3524 return 0;
3525}
3526
3527static int qlge_open(struct net_device *ndev)
3528{
3529 int err = 0;
3530 struct ql_adapter *qdev = netdev_priv(ndev);
3531
3532 err = ql_configure_rings(qdev);
3533 if (err)
3534 return err;
3535
3536 err = ql_get_adapter_resources(qdev);
3537 if (err)
3538 goto error_up;
3539
3540 err = ql_adapter_up(qdev);
3541 if (err)
3542 goto error_up;
3543
3544 return err;
3545
3546error_up:
3547 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003548 return err;
3549}
3550
3551static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3552{
3553 struct ql_adapter *qdev = netdev_priv(ndev);
3554
3555 if (ndev->mtu == 1500 && new_mtu == 9000) {
3556 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003557 queue_delayed_work(qdev->workqueue,
3558 &qdev->mpi_port_cfg_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3560 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3561 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3562 (ndev->mtu == 9000 && new_mtu == 9000)) {
3563 return 0;
3564 } else
3565 return -EINVAL;
3566 ndev->mtu = new_mtu;
3567 return 0;
3568}
3569
3570static struct net_device_stats *qlge_get_stats(struct net_device
3571 *ndev)
3572{
Ajit Khapardebcc90f52009-10-07 02:46:09 +00003573 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003574}
3575
3576static void qlge_set_multicast_list(struct net_device *ndev)
3577{
3578 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3579 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00003580 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003581
Ron Mercercc288f52009-02-23 10:42:14 +00003582 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3583 if (status)
3584 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003585 /*
3586 * Set or clear promiscuous mode if a
3587 * transition is taking place.
3588 */
3589 if (ndev->flags & IFF_PROMISC) {
3590 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3591 if (ql_set_routing_reg
3592 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3593 QPRINTK(qdev, HW, ERR,
3594 "Failed to set promiscous mode.\n");
3595 } else {
3596 set_bit(QL_PROMISCUOUS, &qdev->flags);
3597 }
3598 }
3599 } else {
3600 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3601 if (ql_set_routing_reg
3602 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3603 QPRINTK(qdev, HW, ERR,
3604 "Failed to clear promiscous mode.\n");
3605 } else {
3606 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3607 }
3608 }
3609 }
3610
3611 /*
3612 * Set or clear all multicast mode if a
3613 * transition is taking place.
3614 */
3615 if ((ndev->flags & IFF_ALLMULTI) ||
3616 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3617 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3618 if (ql_set_routing_reg
3619 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3620 QPRINTK(qdev, HW, ERR,
3621 "Failed to set all-multi mode.\n");
3622 } else {
3623 set_bit(QL_ALLMULTI, &qdev->flags);
3624 }
3625 }
3626 } else {
3627 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3628 if (ql_set_routing_reg
3629 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3630 QPRINTK(qdev, HW, ERR,
3631 "Failed to clear all-multi mode.\n");
3632 } else {
3633 clear_bit(QL_ALLMULTI, &qdev->flags);
3634 }
3635 }
3636 }
3637
3638 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00003639 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3640 if (status)
3641 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003642 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3643 i++, mc_ptr = mc_ptr->next)
3644 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3645 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3646 QPRINTK(qdev, HW, ERR,
3647 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00003648 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003649 goto exit;
3650 }
Ron Mercercc288f52009-02-23 10:42:14 +00003651 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003652 if (ql_set_routing_reg
3653 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3654 QPRINTK(qdev, HW, ERR,
3655 "Failed to set multicast match mode.\n");
3656 } else {
3657 set_bit(QL_ALLMULTI, &qdev->flags);
3658 }
3659 }
3660exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00003661 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003662}
3663
3664static int qlge_set_mac_address(struct net_device *ndev, void *p)
3665{
3666 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3667 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00003668 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003669
3670 if (netif_running(ndev))
3671 return -EBUSY;
3672
3673 if (!is_valid_ether_addr(addr->sa_data))
3674 return -EADDRNOTAVAIL;
3675 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3676
Ron Mercercc288f52009-02-23 10:42:14 +00003677 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3678 if (status)
3679 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00003680 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3681 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00003682 if (status)
3683 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3684 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3685 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003686}
3687
3688static void qlge_tx_timeout(struct net_device *ndev)
3689{
3690 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003691 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003692}
3693
3694static void ql_asic_reset_work(struct work_struct *work)
3695{
3696 struct ql_adapter *qdev =
3697 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00003698 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003699 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00003700 status = ql_adapter_down(qdev);
3701 if (status)
3702 goto error;
3703
3704 status = ql_adapter_up(qdev);
3705 if (status)
3706 goto error;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003707 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00003708 return;
3709error:
3710 QPRINTK(qdev, IFUP, ALERT,
3711 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003712
Ron Mercerdb988122009-03-09 10:59:17 +00003713 set_bit(QL_ADAPTER_UP, &qdev->flags);
3714 dev_close(qdev->ndev);
3715 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003716}
3717
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003718static struct nic_operations qla8012_nic_ops = {
3719 .get_flash = ql_get_8012_flash_params,
3720 .port_initialize = ql_8012_port_initialize,
3721};
3722
Ron Mercercdca8d02009-03-02 08:07:31 +00003723static struct nic_operations qla8000_nic_ops = {
3724 .get_flash = ql_get_8000_flash_params,
3725 .port_initialize = ql_8000_port_initialize,
3726};
3727
Ron Mercere4552f52009-06-09 05:39:32 +00003728/* Find the pcie function number for the other NIC
3729 * on this chip. Since both NIC functions share a
3730 * common firmware we have the lowest enabled function
3731 * do any common work. Examples would be resetting
3732 * after a fatal firmware error, or doing a firmware
3733 * coredump.
3734 */
3735static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003736{
Ron Mercere4552f52009-06-09 05:39:32 +00003737 int status = 0;
3738 u32 temp;
3739 u32 nic_func1, nic_func2;
3740
3741 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3742 &temp);
3743 if (status)
3744 return status;
3745
3746 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3747 MPI_TEST_NIC_FUNC_MASK);
3748 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3749 MPI_TEST_NIC_FUNC_MASK);
3750
3751 if (qdev->func == nic_func1)
3752 qdev->alt_func = nic_func2;
3753 else if (qdev->func == nic_func2)
3754 qdev->alt_func = nic_func1;
3755 else
3756 status = -EIO;
3757
3758 return status;
3759}
3760
3761static int ql_get_board_info(struct ql_adapter *qdev)
3762{
3763 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003764 qdev->func =
3765 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00003766 if (qdev->func > 3)
3767 return -EIO;
3768
3769 status = ql_get_alt_pcie_func(qdev);
3770 if (status)
3771 return status;
3772
3773 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3774 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003775 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3776 qdev->port_link_up = STS_PL1;
3777 qdev->port_init = STS_PI1;
3778 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3779 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3780 } else {
3781 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3782 qdev->port_link_up = STS_PL0;
3783 qdev->port_init = STS_PI0;
3784 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3785 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3786 }
3787 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003788 qdev->device_id = qdev->pdev->device;
3789 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3790 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00003791 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3792 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00003793 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003794}
3795
3796static void ql_release_all(struct pci_dev *pdev)
3797{
3798 struct net_device *ndev = pci_get_drvdata(pdev);
3799 struct ql_adapter *qdev = netdev_priv(ndev);
3800
3801 if (qdev->workqueue) {
3802 destroy_workqueue(qdev->workqueue);
3803 qdev->workqueue = NULL;
3804 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003805
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003806 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003807 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808 if (qdev->doorbell_area)
3809 iounmap(qdev->doorbell_area);
3810 pci_release_regions(pdev);
3811 pci_set_drvdata(pdev, NULL);
3812}
3813
3814static int __devinit ql_init_device(struct pci_dev *pdev,
3815 struct net_device *ndev, int cards_found)
3816{
3817 struct ql_adapter *qdev = netdev_priv(ndev);
3818 int pos, err = 0;
3819 u16 val16;
3820
Ron Mercere3324712009-07-02 06:06:13 +00003821 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003822 err = pci_enable_device(pdev);
3823 if (err) {
3824 dev_err(&pdev->dev, "PCI device enable failed.\n");
3825 return err;
3826 }
3827
Ron Mercerebd6e772009-09-29 08:39:25 +00003828 qdev->ndev = ndev;
3829 qdev->pdev = pdev;
3830 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003831 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3832 if (pos <= 0) {
3833 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3834 "aborting.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00003835 return pos;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003836 } else {
3837 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3838 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3839 val16 |= (PCI_EXP_DEVCTL_CERE |
3840 PCI_EXP_DEVCTL_NFERE |
3841 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3842 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3843 }
3844
3845 err = pci_request_regions(pdev, DRV_NAME);
3846 if (err) {
3847 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00003848 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003849 }
3850
3851 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07003852 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003853 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07003854 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003855 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07003856 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003857 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07003858 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003859 }
3860
3861 if (err) {
3862 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3863 goto err_out;
3864 }
3865
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003866 qdev->reg_base =
3867 ioremap_nocache(pci_resource_start(pdev, 1),
3868 pci_resource_len(pdev, 1));
3869 if (!qdev->reg_base) {
3870 dev_err(&pdev->dev, "Register mapping failed.\n");
3871 err = -ENOMEM;
3872 goto err_out;
3873 }
3874
3875 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3876 qdev->doorbell_area =
3877 ioremap_nocache(pci_resource_start(pdev, 3),
3878 pci_resource_len(pdev, 3));
3879 if (!qdev->doorbell_area) {
3880 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3881 err = -ENOMEM;
3882 goto err_out;
3883 }
3884
Ron Mercere4552f52009-06-09 05:39:32 +00003885 err = ql_get_board_info(qdev);
3886 if (err) {
3887 dev_err(&pdev->dev, "Register access failed.\n");
3888 err = -EIO;
3889 goto err_out;
3890 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003891 qdev->msg_enable = netif_msg_init(debug, default_msg);
3892 spin_lock_init(&qdev->hw_lock);
3893 spin_lock_init(&qdev->stats_lock);
3894
3895 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003896 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003897 if (err) {
3898 dev_err(&pdev->dev, "Invalid FLASH.\n");
3899 goto err_out;
3900 }
3901
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003902 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3903
3904 /* Set up the default ring sizes. */
3905 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3906 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3907
3908 /* Set up the coalescing parameters. */
3909 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3910 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3911 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3912 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3913
3914 /*
3915 * Set up the operating parameters.
3916 */
3917 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003918 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3919 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3920 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3921 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003922 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003923 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003924 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003925
3926 if (!cards_found) {
3927 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3928 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3929 DRV_NAME, DRV_VERSION);
3930 }
3931 return 0;
3932err_out:
3933 ql_release_all(pdev);
3934 pci_disable_device(pdev);
3935 return err;
3936}
3937
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003938
3939static const struct net_device_ops qlge_netdev_ops = {
3940 .ndo_open = qlge_open,
3941 .ndo_stop = qlge_close,
3942 .ndo_start_xmit = qlge_send,
3943 .ndo_change_mtu = qlge_change_mtu,
3944 .ndo_get_stats = qlge_get_stats,
3945 .ndo_set_multicast_list = qlge_set_multicast_list,
3946 .ndo_set_mac_address = qlge_set_mac_address,
3947 .ndo_validate_addr = eth_validate_addr,
3948 .ndo_tx_timeout = qlge_tx_timeout,
3949 .ndo_vlan_rx_register = ql_vlan_rx_register,
3950 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3951 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3952};
3953
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954static int __devinit qlge_probe(struct pci_dev *pdev,
3955 const struct pci_device_id *pci_entry)
3956{
3957 struct net_device *ndev = NULL;
3958 struct ql_adapter *qdev = NULL;
3959 static int cards_found = 0;
3960 int err = 0;
3961
Ron Mercer1e213302009-03-09 10:59:21 +00003962 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
3963 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003964 if (!ndev)
3965 return -ENOMEM;
3966
3967 err = ql_init_device(pdev, ndev, cards_found);
3968 if (err < 0) {
3969 free_netdev(ndev);
3970 return err;
3971 }
3972
3973 qdev = netdev_priv(ndev);
3974 SET_NETDEV_DEV(ndev, &pdev->dev);
3975 ndev->features = (0
3976 | NETIF_F_IP_CSUM
3977 | NETIF_F_SG
3978 | NETIF_F_TSO
3979 | NETIF_F_TSO6
3980 | NETIF_F_TSO_ECN
3981 | NETIF_F_HW_VLAN_TX
3982 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00003983 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003984
3985 if (test_bit(QL_DMA64, &qdev->flags))
3986 ndev->features |= NETIF_F_HIGHDMA;
3987
3988 /*
3989 * Set up net_device structure.
3990 */
3991 ndev->tx_queue_len = qdev->tx_ring_size;
3992 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003993
3994 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003995 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003996 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003997
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003998 err = register_netdev(ndev);
3999 if (err) {
4000 dev_err(&pdev->dev, "net device registration failed.\n");
4001 ql_release_all(pdev);
4002 pci_disable_device(pdev);
4003 return err;
4004 }
Ron Mercer6a473302009-07-02 06:06:12 +00004005 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004006 ql_display_dev_info(ndev);
4007 cards_found++;
4008 return 0;
4009}
4010
4011static void __devexit qlge_remove(struct pci_dev *pdev)
4012{
4013 struct net_device *ndev = pci_get_drvdata(pdev);
4014 unregister_netdev(ndev);
4015 ql_release_all(pdev);
4016 pci_disable_device(pdev);
4017 free_netdev(ndev);
4018}
4019
4020/*
4021 * This callback is called by the PCI subsystem whenever
4022 * a PCI bus error is detected.
4023 */
4024static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4025 enum pci_channel_state state)
4026{
4027 struct net_device *ndev = pci_get_drvdata(pdev);
4028 struct ql_adapter *qdev = netdev_priv(ndev);
4029
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004030 netif_device_detach(ndev);
4031
4032 if (state == pci_channel_io_perm_failure)
4033 return PCI_ERS_RESULT_DISCONNECT;
4034
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004035 if (netif_running(ndev))
4036 ql_adapter_down(qdev);
4037
4038 pci_disable_device(pdev);
4039
4040 /* Request a slot reset. */
4041 return PCI_ERS_RESULT_NEED_RESET;
4042}
4043
4044/*
4045 * This callback is called after the PCI buss has been reset.
4046 * Basically, this tries to restart the card from scratch.
4047 * This is a shortened version of the device probe/discovery code,
4048 * it resembles the first-half of the () routine.
4049 */
4050static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4051{
4052 struct net_device *ndev = pci_get_drvdata(pdev);
4053 struct ql_adapter *qdev = netdev_priv(ndev);
4054
4055 if (pci_enable_device(pdev)) {
4056 QPRINTK(qdev, IFUP, ERR,
4057 "Cannot re-enable PCI device after reset.\n");
4058 return PCI_ERS_RESULT_DISCONNECT;
4059 }
4060
4061 pci_set_master(pdev);
4062
4063 netif_carrier_off(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004064 ql_adapter_reset(qdev);
4065
4066 /* Make sure the EEPROM is good */
4067 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4068
4069 if (!is_valid_ether_addr(ndev->perm_addr)) {
4070 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4071 return PCI_ERS_RESULT_DISCONNECT;
4072 }
4073
4074 return PCI_ERS_RESULT_RECOVERED;
4075}
4076
4077static void qlge_io_resume(struct pci_dev *pdev)
4078{
4079 struct net_device *ndev = pci_get_drvdata(pdev);
4080 struct ql_adapter *qdev = netdev_priv(ndev);
4081
4082 pci_set_master(pdev);
4083
4084 if (netif_running(ndev)) {
4085 if (ql_adapter_up(qdev)) {
4086 QPRINTK(qdev, IFUP, ERR,
4087 "Device initialization failed after reset.\n");
4088 return;
4089 }
4090 }
4091
4092 netif_device_attach(ndev);
4093}
4094
4095static struct pci_error_handlers qlge_err_handler = {
4096 .error_detected = qlge_io_error_detected,
4097 .slot_reset = qlge_io_slot_reset,
4098 .resume = qlge_io_resume,
4099};
4100
4101static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4102{
4103 struct net_device *ndev = pci_get_drvdata(pdev);
4104 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004105 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004106
4107 netif_device_detach(ndev);
4108
4109 if (netif_running(ndev)) {
4110 err = ql_adapter_down(qdev);
4111 if (!err)
4112 return err;
4113 }
4114
4115 err = pci_save_state(pdev);
4116 if (err)
4117 return err;
4118
4119 pci_disable_device(pdev);
4120
4121 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4122
4123 return 0;
4124}
4125
David S. Miller04da2cf2008-09-19 16:14:24 -07004126#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004127static int qlge_resume(struct pci_dev *pdev)
4128{
4129 struct net_device *ndev = pci_get_drvdata(pdev);
4130 struct ql_adapter *qdev = netdev_priv(ndev);
4131 int err;
4132
4133 pci_set_power_state(pdev, PCI_D0);
4134 pci_restore_state(pdev);
4135 err = pci_enable_device(pdev);
4136 if (err) {
4137 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4138 return err;
4139 }
4140 pci_set_master(pdev);
4141
4142 pci_enable_wake(pdev, PCI_D3hot, 0);
4143 pci_enable_wake(pdev, PCI_D3cold, 0);
4144
4145 if (netif_running(ndev)) {
4146 err = ql_adapter_up(qdev);
4147 if (err)
4148 return err;
4149 }
4150
4151 netif_device_attach(ndev);
4152
4153 return 0;
4154}
David S. Miller04da2cf2008-09-19 16:14:24 -07004155#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004156
4157static void qlge_shutdown(struct pci_dev *pdev)
4158{
4159 qlge_suspend(pdev, PMSG_SUSPEND);
4160}
4161
4162static struct pci_driver qlge_driver = {
4163 .name = DRV_NAME,
4164 .id_table = qlge_pci_tbl,
4165 .probe = qlge_probe,
4166 .remove = __devexit_p(qlge_remove),
4167#ifdef CONFIG_PM
4168 .suspend = qlge_suspend,
4169 .resume = qlge_resume,
4170#endif
4171 .shutdown = qlge_shutdown,
4172 .err_handler = &qlge_err_handler
4173};
4174
4175static int __init qlge_init_module(void)
4176{
4177 return pci_register_driver(&qlge_driver);
4178}
4179
4180static void __exit qlge_exit(void)
4181{
4182 pci_unregister_driver(&qlge_driver);
4183}
4184
4185module_init(qlge_init_module);
4186module_exit(qlge_exit);