blob: 04fc7117ce49eb094f728bdb50c28fcd1e3642e7 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000077 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
Ron Mercer4322c5b2009-07-02 06:06:06 +0000216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
323 case MAC_ADDR_TYPE_CAM_MAC:
324 {
325 u32 cam_output;
326 u32 upper = (addr[0] << 8) | addr[1];
327 u32 lower =
328 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
329 (addr[5]);
330
Ron Mercer49740972009-02-26 10:08:36 +0000331 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700332 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400333 " at index %d in the CAM.\n",
334 ((type ==
335 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700336 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400337
338 status =
339 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800340 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400341 if (status)
342 goto exit;
343 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
344 (index << MAC_ADDR_IDX_SHIFT) | /* index */
345 type); /* type */
346 ql_write32(qdev, MAC_ADDR_DATA, lower);
347 status =
348 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400350 if (status)
351 goto exit;
352 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
353 (index << MAC_ADDR_IDX_SHIFT) | /* index */
354 type); /* type */
355 ql_write32(qdev, MAC_ADDR_DATA, upper);
356 status =
357 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800358 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400359 if (status)
360 goto exit;
361 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
362 (index << MAC_ADDR_IDX_SHIFT) | /* index */
363 type); /* type */
364 /* This field should also include the queue id
365 and possibly the function id. Right now we hardcode
366 the route field to NIC core.
367 */
368 if (type == MAC_ADDR_TYPE_CAM_MAC) {
369 cam_output = (CAM_OUT_ROUTE_NIC |
370 (qdev->
371 func << CAM_OUT_FUNC_SHIFT) |
Ron Mercerb2014ff2009-08-27 11:02:09 +0000372 (0 << CAM_OUT_CQ_ID_SHIFT));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400373 if (qdev->vlgrp)
374 cam_output |= CAM_OUT_RV;
375 /* route to NIC core */
376 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
377 }
378 break;
379 }
380 case MAC_ADDR_TYPE_VLAN:
381 {
382 u32 enable_bit = *((u32 *) &addr[0]);
383 /* For VLAN, the addr actually holds a bit that
384 * either enables or disables the vlan id we are
385 * addressing. It's either MAC_ADDR_E on or off.
386 * That's bit-27 we're talking about.
387 */
388 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
389 (enable_bit ? "Adding" : "Removing"),
390 index, (enable_bit ? "to" : "from"));
391
392 status =
393 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400395 if (status)
396 goto exit;
397 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
399 type | /* type */
400 enable_bit); /* enable/disable */
401 break;
402 }
403 case MAC_ADDR_TYPE_MULTI_FLTR:
404 default:
405 QPRINTK(qdev, IFUP, CRIT,
406 "Address type %d not yet supported.\n", type);
407 status = -EPERM;
408 }
409exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400410 return status;
411}
412
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000413/* Set or clear MAC address in hardware. We sometimes
414 * have to clear it to prevent wrong frame routing
415 * especially in a bonding environment.
416 */
417static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
418{
419 int status;
420 char zero_mac_addr[ETH_ALEN];
421 char *addr;
422
423 if (set) {
424 addr = &qdev->ndev->dev_addr[0];
425 QPRINTK(qdev, IFUP, DEBUG,
426 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
427 addr[0], addr[1], addr[2], addr[3],
428 addr[4], addr[5]);
429 } else {
430 memset(zero_mac_addr, 0, ETH_ALEN);
431 addr = &zero_mac_addr[0];
432 QPRINTK(qdev, IFUP, DEBUG,
433 "Clearing MAC address on %s\n",
434 qdev->ndev->name);
435 }
436 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
437 if (status)
438 return status;
439 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
440 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
441 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
442 if (status)
443 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
444 "address.\n");
445 return status;
446}
447
Ron Mercer6a473302009-07-02 06:06:12 +0000448void ql_link_on(struct ql_adapter *qdev)
449{
450 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
451 qdev->ndev->name);
452 netif_carrier_on(qdev->ndev);
453 ql_set_mac_addr(qdev, 1);
454}
455
456void ql_link_off(struct ql_adapter *qdev)
457{
458 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
459 qdev->ndev->name);
460 netif_carrier_off(qdev->ndev);
461 ql_set_mac_addr(qdev, 0);
462}
463
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400464/* Get a specific frame routing value from the CAM.
465 * Used for debug and reg dump.
466 */
467int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
468{
469 int status = 0;
470
Ron Mercer939678f2009-01-04 17:08:29 -0800471 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400472 if (status)
473 goto exit;
474
475 ql_write32(qdev, RT_IDX,
476 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800477 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400478 if (status)
479 goto exit;
480 *value = ql_read32(qdev, RT_DATA);
481exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400482 return status;
483}
484
485/* The NIC function for this chip has 16 routing indexes. Each one can be used
486 * to route different frame types to various inbound queues. We send broadcast/
487 * multicast/error frames to the default queue for slow handling,
488 * and CAM hit/RSS frames to the fast handling queues.
489 */
490static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
491 int enable)
492{
Ron Mercer8587ea32009-02-23 10:42:15 +0000493 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400494 u32 value = 0;
495
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400496 QPRINTK(qdev, IFUP, DEBUG,
497 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
498 (enable ? "Adding" : "Removing"),
499 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
500 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
501 ((index ==
502 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
503 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
504 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
505 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
506 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
507 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
508 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
509 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
510 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
511 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
512 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
513 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
514 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
515 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
516 (enable ? "to" : "from"));
517
518 switch (mask) {
519 case RT_IDX_CAM_HIT:
520 {
521 value = RT_IDX_DST_CAM_Q | /* dest */
522 RT_IDX_TYPE_NICQ | /* type */
523 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
524 break;
525 }
526 case RT_IDX_VALID: /* Promiscuous Mode frames. */
527 {
528 value = RT_IDX_DST_DFLT_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
547 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
548 {
549 value = RT_IDX_DST_CAM_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
552 break;
553 }
554 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
555 {
556 value = RT_IDX_DST_CAM_Q | /* dest */
557 RT_IDX_TYPE_NICQ | /* type */
558 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
559 break;
560 }
561 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
562 {
563 value = RT_IDX_DST_RSS | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case 0: /* Clear the E-bit on an entry. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (index << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 default:
576 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
577 mask);
578 status = -EPERM;
579 goto exit;
580 }
581
582 if (value) {
583 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
584 if (status)
585 goto exit;
586 value |= (enable ? RT_IDX_E : 0);
587 ql_write32(qdev, RT_IDX, value);
588 ql_write32(qdev, RT_DATA, enable ? mask : 0);
589 }
590exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400591 return status;
592}
593
594static void ql_enable_interrupts(struct ql_adapter *qdev)
595{
596 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
597}
598
599static void ql_disable_interrupts(struct ql_adapter *qdev)
600{
601 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
602}
603
604/* If we're running with multiple MSI-X vectors then we enable on the fly.
605 * Otherwise, we may have multiple outstanding workers and don't want to
606 * enable until the last one finishes. In this case, the irq_cnt gets
607 * incremented everytime we queue a worker and decremented everytime
608 * a worker finishes. Once it hits zero we enable the interrupt.
609 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700610u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400611{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700612 u32 var = 0;
613 unsigned long hw_flags = 0;
614 struct intr_context *ctx = qdev->intr_context + intr;
615
616 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
617 /* Always enable if we're MSIX multi interrupts and
618 * it's not the default (zeroeth) interrupt.
619 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400620 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700621 ctx->intr_en_mask);
622 var = ql_read32(qdev, STS);
623 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400624 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700625
626 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
627 if (atomic_dec_and_test(&ctx->irq_cnt)) {
628 ql_write32(qdev, INTR_EN,
629 ctx->intr_en_mask);
630 var = ql_read32(qdev, STS);
631 }
632 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
633 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634}
635
636static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
637{
638 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700639 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400640
Ron Mercerbb0d2152008-10-20 10:30:26 -0700641 /* HW disables for us if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
644 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
645 return 0;
646
647 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000648 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700649 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400650 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700651 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400652 var = ql_read32(qdev, STS);
653 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700654 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000655 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400656 return var;
657}
658
659static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
660{
661 int i;
662 for (i = 0; i < qdev->intr_count; i++) {
663 /* The enable call does a atomic_dec_and_test
664 * and enables only if the result is zero.
665 * So we precharge it here.
666 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700667 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
668 i == 0))
669 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400670 ql_enable_completion_interrupt(qdev, i);
671 }
672
673}
674
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000675static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
676{
677 int status, i;
678 u16 csum = 0;
679 __le16 *flash = (__le16 *)&qdev->flash;
680
681 status = strncmp((char *)&qdev->flash, str, 4);
682 if (status) {
683 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
684 return status;
685 }
686
687 for (i = 0; i < size; i++)
688 csum += le16_to_cpu(*flash++);
689
690 if (csum)
691 QPRINTK(qdev, IFUP, ERR,
692 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
693
694 return csum;
695}
696
Ron Mercer26351472009-02-02 13:53:57 -0800697static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400698{
699 int status = 0;
700 /* wait for reg to come ready */
701 status = ql_wait_reg_rdy(qdev,
702 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
703 if (status)
704 goto exit;
705 /* set up for reg read */
706 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
707 /* wait for reg to come ready */
708 status = ql_wait_reg_rdy(qdev,
709 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
710 if (status)
711 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800712 /* This data is stored on flash as an array of
713 * __le32. Since ql_read32() returns cpu endian
714 * we need to swap it back.
715 */
716 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400717exit:
718 return status;
719}
720
Ron Mercercdca8d02009-03-02 08:07:31 +0000721static int ql_get_8000_flash_params(struct ql_adapter *qdev)
722{
723 u32 i, size;
724 int status;
725 __le32 *p = (__le32 *)&qdev->flash;
726 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000727 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000728
729 /* Get flash offset for function and adjust
730 * for dword access.
731 */
Ron Mercere4552f52009-06-09 05:39:32 +0000732 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000733 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
734 else
735 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
736
737 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
738 return -ETIMEDOUT;
739
740 size = sizeof(struct flash_params_8000) / sizeof(u32);
741 for (i = 0; i < size; i++, p++) {
742 status = ql_read_flash_word(qdev, i+offset, p);
743 if (status) {
744 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
745 goto exit;
746 }
747 }
748
749 status = ql_validate_flash(qdev,
750 sizeof(struct flash_params_8000) / sizeof(u16),
751 "8000");
752 if (status) {
753 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
754 status = -EINVAL;
755 goto exit;
756 }
757
Ron Mercer542512e2009-06-09 05:39:33 +0000758 /* Extract either manufacturer or BOFM modified
759 * MAC address.
760 */
761 if (qdev->flash.flash_params_8000.data_type1 == 2)
762 memcpy(mac_addr,
763 qdev->flash.flash_params_8000.mac_addr1,
764 qdev->ndev->addr_len);
765 else
766 memcpy(mac_addr,
767 qdev->flash.flash_params_8000.mac_addr,
768 qdev->ndev->addr_len);
769
770 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000771 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
772 status = -EINVAL;
773 goto exit;
774 }
775
776 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000777 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 qdev->ndev->addr_len);
779
780exit:
781 ql_sem_unlock(qdev, SEM_FLASH_MASK);
782 return status;
783}
784
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000785static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400786{
787 int i;
788 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800789 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800790 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000791 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800792
793 /* Second function's parameters follow the first
794 * function's.
795 */
Ron Mercere4552f52009-06-09 05:39:32 +0000796 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000797 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400798
799 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
800 return -ETIMEDOUT;
801
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000802 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800803 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400804 if (status) {
805 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
806 goto exit;
807 }
808
809 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000810
811 status = ql_validate_flash(qdev,
812 sizeof(struct flash_params_8012) / sizeof(u16),
813 "8012");
814 if (status) {
815 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
816 status = -EINVAL;
817 goto exit;
818 }
819
820 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
821 status = -EINVAL;
822 goto exit;
823 }
824
825 memcpy(qdev->ndev->dev_addr,
826 qdev->flash.flash_params_8012.mac_addr,
827 qdev->ndev->addr_len);
828
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400829exit:
830 ql_sem_unlock(qdev, SEM_FLASH_MASK);
831 return status;
832}
833
834/* xgmac register are located behind the xgmac_addr and xgmac_data
835 * register pair. Each read/write requires us to wait for the ready
836 * bit before reading/writing the data.
837 */
838static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
839{
840 int status;
841 /* wait for reg to come ready */
842 status = ql_wait_reg_rdy(qdev,
843 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
844 if (status)
845 return status;
846 /* write the data to the data reg */
847 ql_write32(qdev, XGMAC_DATA, data);
848 /* trigger the write */
849 ql_write32(qdev, XGMAC_ADDR, reg);
850 return status;
851}
852
853/* xgmac register are located behind the xgmac_addr and xgmac_data
854 * register pair. Each read/write requires us to wait for the ready
855 * bit before reading/writing the data.
856 */
857int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
858{
859 int status = 0;
860 /* wait for reg to come ready */
861 status = ql_wait_reg_rdy(qdev,
862 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
863 if (status)
864 goto exit;
865 /* set up for reg read */
866 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 goto exit;
872 /* get the data */
873 *data = ql_read32(qdev, XGMAC_DATA);
874exit:
875 return status;
876}
877
878/* This is used for reading the 64-bit statistics regs. */
879int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
880{
881 int status = 0;
882 u32 hi = 0;
883 u32 lo = 0;
884
885 status = ql_read_xgmac_reg(qdev, reg, &lo);
886 if (status)
887 goto exit;
888
889 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
890 if (status)
891 goto exit;
892
893 *data = (u64) lo | ((u64) hi << 32);
894
895exit:
896 return status;
897}
898
Ron Mercercdca8d02009-03-02 08:07:31 +0000899static int ql_8000_port_initialize(struct ql_adapter *qdev)
900{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000901 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000902 /*
903 * Get MPI firmware version for driver banner
904 * and ethool info.
905 */
906 status = ql_mb_about_fw(qdev);
907 if (status)
908 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000909 status = ql_mb_get_fw_state(qdev);
910 if (status)
911 goto exit;
912 /* Wake up a worker to get/set the TX/RX frame sizes. */
913 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
914exit:
915 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000916}
917
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400918/* Take the MAC Core out of reset.
919 * Enable statistics counting.
920 * Take the transmitter/receiver out of reset.
921 * This functionality may be done in the MPI firmware at a
922 * later date.
923 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000924static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400925{
926 int status = 0;
927 u32 data;
928
929 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
930 /* Another function has the semaphore, so
931 * wait for the port init bit to come ready.
932 */
933 QPRINTK(qdev, LINK, INFO,
934 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
935 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
936 if (status) {
937 QPRINTK(qdev, LINK, CRIT,
938 "Port initialize timed out.\n");
939 }
940 return status;
941 }
942
943 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
944 /* Set the core reset. */
945 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
946 if (status)
947 goto end;
948 data |= GLOBAL_CFG_RESET;
949 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
950 if (status)
951 goto end;
952
953 /* Clear the core reset and turn on jumbo for receiver. */
954 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
955 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
956 data |= GLOBAL_CFG_TX_STAT_EN;
957 data |= GLOBAL_CFG_RX_STAT_EN;
958 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
959 if (status)
960 goto end;
961
962 /* Enable transmitter, and clear it's reset. */
963 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
964 if (status)
965 goto end;
966 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
967 data |= TX_CFG_EN; /* Enable the transmitter. */
968 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
969 if (status)
970 goto end;
971
972 /* Enable receiver and clear it's reset. */
973 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
974 if (status)
975 goto end;
976 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
977 data |= RX_CFG_EN; /* Enable the receiver. */
978 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
979 if (status)
980 goto end;
981
982 /* Turn on jumbo. */
983 status =
984 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
985 if (status)
986 goto end;
987 status =
988 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
989 if (status)
990 goto end;
991
992 /* Signal to the world that the port is enabled. */
993 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
994end:
995 ql_sem_unlock(qdev, qdev->xg_sem_mask);
996 return status;
997}
998
999/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001000static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001001{
1002 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1003 rx_ring->lbq_curr_idx++;
1004 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1005 rx_ring->lbq_curr_idx = 0;
1006 rx_ring->lbq_free_cnt++;
1007 return lbq_desc;
1008}
1009
1010/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001011static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001012{
1013 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1014 rx_ring->sbq_curr_idx++;
1015 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1016 rx_ring->sbq_curr_idx = 0;
1017 rx_ring->sbq_free_cnt++;
1018 return sbq_desc;
1019}
1020
1021/* Update an rx ring index. */
1022static void ql_update_cq(struct rx_ring *rx_ring)
1023{
1024 rx_ring->cnsmr_idx++;
1025 rx_ring->curr_entry++;
1026 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1027 rx_ring->cnsmr_idx = 0;
1028 rx_ring->curr_entry = rx_ring->cq_base;
1029 }
1030}
1031
1032static void ql_write_cq_idx(struct rx_ring *rx_ring)
1033{
1034 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1035}
1036
1037/* Process (refill) a large buffer queue. */
1038static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1039{
Ron Mercer49f21862009-02-23 10:42:16 +00001040 u32 clean_idx = rx_ring->lbq_clean_idx;
1041 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001042 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001043 u64 map;
1044 int i;
1045
1046 while (rx_ring->lbq_free_cnt > 16) {
1047 for (i = 0; i < 16; i++) {
1048 QPRINTK(qdev, RX_STATUS, DEBUG,
1049 "lbq: try cleaning clean_idx = %d.\n",
1050 clean_idx);
1051 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001052 if (lbq_desc->p.lbq_page == NULL) {
1053 QPRINTK(qdev, RX_STATUS, DEBUG,
1054 "lbq: getting new page for index %d.\n",
1055 lbq_desc->index);
1056 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1057 if (lbq_desc->p.lbq_page == NULL) {
Ron Mercer79d2b292009-02-12 16:38:34 -08001058 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001059 QPRINTK(qdev, RX_STATUS, ERR,
1060 "Couldn't get a page.\n");
1061 return;
1062 }
1063 map = pci_map_page(qdev->pdev,
1064 lbq_desc->p.lbq_page,
1065 0, PAGE_SIZE,
1066 PCI_DMA_FROMDEVICE);
1067 if (pci_dma_mapping_error(qdev->pdev, map)) {
Ron Mercer79d2b292009-02-12 16:38:34 -08001068 rx_ring->lbq_clean_idx = clean_idx;
Ron Mercerf2603c22009-02-12 16:37:32 -08001069 put_page(lbq_desc->p.lbq_page);
1070 lbq_desc->p.lbq_page = NULL;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001071 QPRINTK(qdev, RX_STATUS, ERR,
1072 "PCI mapping failed.\n");
1073 return;
1074 }
1075 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1076 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001077 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001078 }
1079 clean_idx++;
1080 if (clean_idx == rx_ring->lbq_len)
1081 clean_idx = 0;
1082 }
1083
1084 rx_ring->lbq_clean_idx = clean_idx;
1085 rx_ring->lbq_prod_idx += 16;
1086 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1087 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001088 rx_ring->lbq_free_cnt -= 16;
1089 }
1090
1091 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001092 QPRINTK(qdev, RX_STATUS, DEBUG,
1093 "lbq: updating prod idx = %d.\n",
1094 rx_ring->lbq_prod_idx);
1095 ql_write_db_reg(rx_ring->lbq_prod_idx,
1096 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097 }
1098}
1099
1100/* Process (refill) a small buffer queue. */
1101static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1102{
Ron Mercer49f21862009-02-23 10:42:16 +00001103 u32 clean_idx = rx_ring->sbq_clean_idx;
1104 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001105 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001106 u64 map;
1107 int i;
1108
1109 while (rx_ring->sbq_free_cnt > 16) {
1110 for (i = 0; i < 16; i++) {
1111 sbq_desc = &rx_ring->sbq[clean_idx];
1112 QPRINTK(qdev, RX_STATUS, DEBUG,
1113 "sbq: try cleaning clean_idx = %d.\n",
1114 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001115 if (sbq_desc->p.skb == NULL) {
1116 QPRINTK(qdev, RX_STATUS, DEBUG,
1117 "sbq: getting new skb for index %d.\n",
1118 sbq_desc->index);
1119 sbq_desc->p.skb =
1120 netdev_alloc_skb(qdev->ndev,
1121 rx_ring->sbq_buf_size);
1122 if (sbq_desc->p.skb == NULL) {
1123 QPRINTK(qdev, PROBE, ERR,
1124 "Couldn't get an skb.\n");
1125 rx_ring->sbq_clean_idx = clean_idx;
1126 return;
1127 }
1128 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1129 map = pci_map_single(qdev->pdev,
1130 sbq_desc->p.skb->data,
1131 rx_ring->sbq_buf_size /
1132 2, PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001133 if (pci_dma_mapping_error(qdev->pdev, map)) {
1134 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1135 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001136 dev_kfree_skb_any(sbq_desc->p.skb);
1137 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001138 return;
1139 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001140 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1141 pci_unmap_len_set(sbq_desc, maplen,
1142 rx_ring->sbq_buf_size / 2);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001143 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 }
1145
1146 clean_idx++;
1147 if (clean_idx == rx_ring->sbq_len)
1148 clean_idx = 0;
1149 }
1150 rx_ring->sbq_clean_idx = clean_idx;
1151 rx_ring->sbq_prod_idx += 16;
1152 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1153 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001154 rx_ring->sbq_free_cnt -= 16;
1155 }
1156
1157 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001158 QPRINTK(qdev, RX_STATUS, DEBUG,
1159 "sbq: updating prod idx = %d.\n",
1160 rx_ring->sbq_prod_idx);
1161 ql_write_db_reg(rx_ring->sbq_prod_idx,
1162 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001163 }
1164}
1165
1166static void ql_update_buffer_queues(struct ql_adapter *qdev,
1167 struct rx_ring *rx_ring)
1168{
1169 ql_update_sbq(qdev, rx_ring);
1170 ql_update_lbq(qdev, rx_ring);
1171}
1172
1173/* Unmaps tx buffers. Can be called from send() if a pci mapping
1174 * fails at some stage, or from the interrupt when a tx completes.
1175 */
1176static void ql_unmap_send(struct ql_adapter *qdev,
1177 struct tx_ring_desc *tx_ring_desc, int mapped)
1178{
1179 int i;
1180 for (i = 0; i < mapped; i++) {
1181 if (i == 0 || (i == 7 && mapped > 7)) {
1182 /*
1183 * Unmap the skb->data area, or the
1184 * external sglist (AKA the Outbound
1185 * Address List (OAL)).
1186 * If its the zeroeth element, then it's
1187 * the skb->data area. If it's the 7th
1188 * element and there is more than 6 frags,
1189 * then its an OAL.
1190 */
1191 if (i == 7) {
1192 QPRINTK(qdev, TX_DONE, DEBUG,
1193 "unmapping OAL area.\n");
1194 }
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(&tx_ring_desc->map[i],
1197 mapaddr),
1198 pci_unmap_len(&tx_ring_desc->map[i],
1199 maplen),
1200 PCI_DMA_TODEVICE);
1201 } else {
1202 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1203 i);
1204 pci_unmap_page(qdev->pdev,
1205 pci_unmap_addr(&tx_ring_desc->map[i],
1206 mapaddr),
1207 pci_unmap_len(&tx_ring_desc->map[i],
1208 maplen), PCI_DMA_TODEVICE);
1209 }
1210 }
1211
1212}
1213
1214/* Map the buffers for this transmit. This will return
1215 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1216 */
1217static int ql_map_send(struct ql_adapter *qdev,
1218 struct ob_mac_iocb_req *mac_iocb_ptr,
1219 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1220{
1221 int len = skb_headlen(skb);
1222 dma_addr_t map;
1223 int frag_idx, err, map_idx = 0;
1224 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1225 int frag_cnt = skb_shinfo(skb)->nr_frags;
1226
1227 if (frag_cnt) {
1228 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1229 }
1230 /*
1231 * Map the skb buffer first.
1232 */
1233 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1234
1235 err = pci_dma_mapping_error(qdev->pdev, map);
1236 if (err) {
1237 QPRINTK(qdev, TX_QUEUED, ERR,
1238 "PCI mapping failed with error: %d\n", err);
1239
1240 return NETDEV_TX_BUSY;
1241 }
1242
1243 tbd->len = cpu_to_le32(len);
1244 tbd->addr = cpu_to_le64(map);
1245 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1246 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1247 map_idx++;
1248
1249 /*
1250 * This loop fills the remainder of the 8 address descriptors
1251 * in the IOCB. If there are more than 7 fragments, then the
1252 * eighth address desc will point to an external list (OAL).
1253 * When this happens, the remainder of the frags will be stored
1254 * in this list.
1255 */
1256 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1257 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1258 tbd++;
1259 if (frag_idx == 6 && frag_cnt > 7) {
1260 /* Let's tack on an sglist.
1261 * Our control block will now
1262 * look like this:
1263 * iocb->seg[0] = skb->data
1264 * iocb->seg[1] = frag[0]
1265 * iocb->seg[2] = frag[1]
1266 * iocb->seg[3] = frag[2]
1267 * iocb->seg[4] = frag[3]
1268 * iocb->seg[5] = frag[4]
1269 * iocb->seg[6] = frag[5]
1270 * iocb->seg[7] = ptr to OAL (external sglist)
1271 * oal->seg[0] = frag[6]
1272 * oal->seg[1] = frag[7]
1273 * oal->seg[2] = frag[8]
1274 * oal->seg[3] = frag[9]
1275 * oal->seg[4] = frag[10]
1276 * etc...
1277 */
1278 /* Tack on the OAL in the eighth segment of IOCB. */
1279 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1280 sizeof(struct oal),
1281 PCI_DMA_TODEVICE);
1282 err = pci_dma_mapping_error(qdev->pdev, map);
1283 if (err) {
1284 QPRINTK(qdev, TX_QUEUED, ERR,
1285 "PCI mapping outbound address list with error: %d\n",
1286 err);
1287 goto map_error;
1288 }
1289
1290 tbd->addr = cpu_to_le64(map);
1291 /*
1292 * The length is the number of fragments
1293 * that remain to be mapped times the length
1294 * of our sglist (OAL).
1295 */
1296 tbd->len =
1297 cpu_to_le32((sizeof(struct tx_buf_desc) *
1298 (frag_cnt - frag_idx)) | TX_DESC_C);
1299 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1300 map);
1301 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1302 sizeof(struct oal));
1303 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1304 map_idx++;
1305 }
1306
1307 map =
1308 pci_map_page(qdev->pdev, frag->page,
1309 frag->page_offset, frag->size,
1310 PCI_DMA_TODEVICE);
1311
1312 err = pci_dma_mapping_error(qdev->pdev, map);
1313 if (err) {
1314 QPRINTK(qdev, TX_QUEUED, ERR,
1315 "PCI mapping frags failed with error: %d.\n",
1316 err);
1317 goto map_error;
1318 }
1319
1320 tbd->addr = cpu_to_le64(map);
1321 tbd->len = cpu_to_le32(frag->size);
1322 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1323 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1324 frag->size);
1325
1326 }
1327 /* Save the number of segments we've mapped. */
1328 tx_ring_desc->map_cnt = map_idx;
1329 /* Terminate the last segment. */
1330 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1331 return NETDEV_TX_OK;
1332
1333map_error:
1334 /*
1335 * If the first frag mapping failed, then i will be zero.
1336 * This causes the unmap of the skb->data area. Otherwise
1337 * we pass in the number of frags that mapped successfully
1338 * so they can be umapped.
1339 */
1340 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1341 return NETDEV_TX_BUSY;
1342}
1343
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001344static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001345{
1346 void *temp_addr = skb->data;
1347
1348 /* Undo the skb_reserve(skb,32) we did before
1349 * giving to hardware, and realign data on
1350 * a 2-byte boundary.
1351 */
1352 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1353 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1354 skb_copy_to_linear_data(skb, temp_addr,
1355 (unsigned int)len);
1356}
1357
1358/*
1359 * This function builds an skb for the given inbound
1360 * completion. It will be rewritten for readability in the near
1361 * future, but for not it works well.
1362 */
1363static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1364 struct rx_ring *rx_ring,
1365 struct ib_mac_iocb_rsp *ib_mac_rsp)
1366{
1367 struct bq_desc *lbq_desc;
1368 struct bq_desc *sbq_desc;
1369 struct sk_buff *skb = NULL;
1370 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1371 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1372
1373 /*
1374 * Handle the header buffer if present.
1375 */
1376 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1377 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1378 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1379 /*
1380 * Headers fit nicely into a small buffer.
1381 */
1382 sbq_desc = ql_get_curr_sbuf(rx_ring);
1383 pci_unmap_single(qdev->pdev,
1384 pci_unmap_addr(sbq_desc, mapaddr),
1385 pci_unmap_len(sbq_desc, maplen),
1386 PCI_DMA_FROMDEVICE);
1387 skb = sbq_desc->p.skb;
1388 ql_realign_skb(skb, hdr_len);
1389 skb_put(skb, hdr_len);
1390 sbq_desc->p.skb = NULL;
1391 }
1392
1393 /*
1394 * Handle the data buffer(s).
1395 */
1396 if (unlikely(!length)) { /* Is there data too? */
1397 QPRINTK(qdev, RX_STATUS, DEBUG,
1398 "No Data buffer in this packet.\n");
1399 return skb;
1400 }
1401
1402 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1403 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG,
1405 "Headers in small, data of %d bytes in small, combine them.\n", length);
1406 /*
1407 * Data is less than small buffer size so it's
1408 * stuffed in a small buffer.
1409 * For this case we append the data
1410 * from the "data" small buffer to the "header" small
1411 * buffer.
1412 */
1413 sbq_desc = ql_get_curr_sbuf(rx_ring);
1414 pci_dma_sync_single_for_cpu(qdev->pdev,
1415 pci_unmap_addr
1416 (sbq_desc, mapaddr),
1417 pci_unmap_len
1418 (sbq_desc, maplen),
1419 PCI_DMA_FROMDEVICE);
1420 memcpy(skb_put(skb, length),
1421 sbq_desc->p.skb->data, length);
1422 pci_dma_sync_single_for_device(qdev->pdev,
1423 pci_unmap_addr
1424 (sbq_desc,
1425 mapaddr),
1426 pci_unmap_len
1427 (sbq_desc,
1428 maplen),
1429 PCI_DMA_FROMDEVICE);
1430 } else {
1431 QPRINTK(qdev, RX_STATUS, DEBUG,
1432 "%d bytes in a single small buffer.\n", length);
1433 sbq_desc = ql_get_curr_sbuf(rx_ring);
1434 skb = sbq_desc->p.skb;
1435 ql_realign_skb(skb, length);
1436 skb_put(skb, length);
1437 pci_unmap_single(qdev->pdev,
1438 pci_unmap_addr(sbq_desc,
1439 mapaddr),
1440 pci_unmap_len(sbq_desc,
1441 maplen),
1442 PCI_DMA_FROMDEVICE);
1443 sbq_desc->p.skb = NULL;
1444 }
1445 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1446 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1447 QPRINTK(qdev, RX_STATUS, DEBUG,
1448 "Header in small, %d bytes in large. Chain large to small!\n", length);
1449 /*
1450 * The data is in a single large buffer. We
1451 * chain it to the header buffer's skb and let
1452 * it rip.
1453 */
1454 lbq_desc = ql_get_curr_lbuf(rx_ring);
1455 pci_unmap_page(qdev->pdev,
1456 pci_unmap_addr(lbq_desc,
1457 mapaddr),
1458 pci_unmap_len(lbq_desc, maplen),
1459 PCI_DMA_FROMDEVICE);
1460 QPRINTK(qdev, RX_STATUS, DEBUG,
1461 "Chaining page to skb.\n");
1462 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1463 0, length);
1464 skb->len += length;
1465 skb->data_len += length;
1466 skb->truesize += length;
1467 lbq_desc->p.lbq_page = NULL;
1468 } else {
1469 /*
1470 * The headers and data are in a single large buffer. We
1471 * copy it to a new skb and let it go. This can happen with
1472 * jumbo mtu on a non-TCP/UDP frame.
1473 */
1474 lbq_desc = ql_get_curr_lbuf(rx_ring);
1475 skb = netdev_alloc_skb(qdev->ndev, length);
1476 if (skb == NULL) {
1477 QPRINTK(qdev, PROBE, DEBUG,
1478 "No skb available, drop the packet.\n");
1479 return NULL;
1480 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001481 pci_unmap_page(qdev->pdev,
1482 pci_unmap_addr(lbq_desc,
1483 mapaddr),
1484 pci_unmap_len(lbq_desc, maplen),
1485 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001486 skb_reserve(skb, NET_IP_ALIGN);
1487 QPRINTK(qdev, RX_STATUS, DEBUG,
1488 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1489 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1490 0, length);
1491 skb->len += length;
1492 skb->data_len += length;
1493 skb->truesize += length;
1494 length -= length;
1495 lbq_desc->p.lbq_page = NULL;
1496 __pskb_pull_tail(skb,
1497 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1498 VLAN_ETH_HLEN : ETH_HLEN);
1499 }
1500 } else {
1501 /*
1502 * The data is in a chain of large buffers
1503 * pointed to by a small buffer. We loop
1504 * thru and chain them to the our small header
1505 * buffer's skb.
1506 * frags: There are 18 max frags and our small
1507 * buffer will hold 32 of them. The thing is,
1508 * we'll use 3 max for our 9000 byte jumbo
1509 * frames. If the MTU goes up we could
1510 * eventually be in trouble.
1511 */
1512 int size, offset, i = 0;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001513 __le64 *bq, bq_array[8];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001514 sbq_desc = ql_get_curr_sbuf(rx_ring);
1515 pci_unmap_single(qdev->pdev,
1516 pci_unmap_addr(sbq_desc, mapaddr),
1517 pci_unmap_len(sbq_desc, maplen),
1518 PCI_DMA_FROMDEVICE);
1519 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1520 /*
1521 * This is an non TCP/UDP IP frame, so
1522 * the headers aren't split into a small
1523 * buffer. We have to use the small buffer
1524 * that contains our sg list as our skb to
1525 * send upstairs. Copy the sg list here to
1526 * a local buffer and use it to find the
1527 * pages to chain.
1528 */
1529 QPRINTK(qdev, RX_STATUS, DEBUG,
1530 "%d bytes of headers & data in chain of large.\n", length);
1531 skb = sbq_desc->p.skb;
1532 bq = &bq_array[0];
1533 memcpy(bq, skb->data, sizeof(bq_array));
1534 sbq_desc->p.skb = NULL;
1535 skb_reserve(skb, NET_IP_ALIGN);
1536 } else {
1537 QPRINTK(qdev, RX_STATUS, DEBUG,
1538 "Headers in small, %d bytes of data in chain of large.\n", length);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001539 bq = (__le64 *)sbq_desc->p.skb->data;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001540 }
1541 while (length > 0) {
1542 lbq_desc = ql_get_curr_lbuf(rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001543 pci_unmap_page(qdev->pdev,
1544 pci_unmap_addr(lbq_desc,
1545 mapaddr),
1546 pci_unmap_len(lbq_desc,
1547 maplen),
1548 PCI_DMA_FROMDEVICE);
1549 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1550 offset = 0;
1551
1552 QPRINTK(qdev, RX_STATUS, DEBUG,
1553 "Adding page %d to skb for %d bytes.\n",
1554 i, size);
1555 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1556 offset, size);
1557 skb->len += size;
1558 skb->data_len += size;
1559 skb->truesize += size;
1560 length -= size;
1561 lbq_desc->p.lbq_page = NULL;
1562 bq++;
1563 i++;
1564 }
1565 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1566 VLAN_ETH_HLEN : ETH_HLEN);
1567 }
1568 return skb;
1569}
1570
1571/* Process an inbound completion from an rx ring. */
1572static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp)
1575{
1576 struct net_device *ndev = qdev->ndev;
1577 struct sk_buff *skb = NULL;
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001578 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1579 IB_MAC_IOCB_RSP_VLAN_MASK)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001580
1581 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1582
1583 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1584 if (unlikely(!skb)) {
1585 QPRINTK(qdev, RX_STATUS, DEBUG,
1586 "No skb available, drop packet.\n");
1587 return;
1588 }
1589
Ron Mercera32959c2009-06-09 05:39:27 +00001590 /* Frame error, so drop the packet. */
1591 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1592 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1593 ib_mac_rsp->flags2);
1594 dev_kfree_skb_any(skb);
1595 return;
1596 }
Ron Mercerec33a492009-06-09 05:39:28 +00001597
1598 /* The max framesize filter on this chip is set higher than
1599 * MTU since FCoE uses 2k frames.
1600 */
1601 if (skb->len > ndev->mtu + ETH_HLEN) {
1602 dev_kfree_skb_any(skb);
1603 return;
1604 }
1605
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001606 prefetch(skb->data);
1607 skb->dev = ndev;
1608 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1609 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1610 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1612 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1613 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1614 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1615 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1616 }
1617 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1618 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1619 }
Ron Mercerd555f592009-03-09 10:59:19 +00001620
Ron Mercerd555f592009-03-09 10:59:19 +00001621 skb->protocol = eth_type_trans(skb, ndev);
1622 skb->ip_summed = CHECKSUM_NONE;
1623
1624 /* If rx checksum is on, and there are no
1625 * csum or frame errors.
1626 */
1627 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001628 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1629 /* TCP frame. */
1630 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1631 QPRINTK(qdev, RX_STATUS, DEBUG,
1632 "TCP checksum done!\n");
1633 skb->ip_summed = CHECKSUM_UNNECESSARY;
1634 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1635 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1636 /* Unfragmented ipv4 UDP frame. */
1637 struct iphdr *iph = (struct iphdr *) skb->data;
1638 if (!(iph->frag_off &
1639 cpu_to_be16(IP_MF|IP_OFFSET))) {
1640 skb->ip_summed = CHECKSUM_UNNECESSARY;
1641 QPRINTK(qdev, RX_STATUS, DEBUG,
1642 "TCP checksum done!\n");
1643 }
1644 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001645 }
Ron Mercerd555f592009-03-09 10:59:19 +00001646
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001647 qdev->stats.rx_packets++;
1648 qdev->stats.rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001649 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001650 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1651 if (qdev->vlgrp &&
1652 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1653 (vlan_id != 0))
1654 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1655 vlan_id, skb);
1656 else
1657 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001658 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001659 if (qdev->vlgrp &&
1660 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1661 (vlan_id != 0))
1662 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1663 else
1664 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001665 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001666}
1667
1668/* Process an outbound completion from an rx ring. */
1669static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1670 struct ob_mac_iocb_rsp *mac_rsp)
1671{
1672 struct tx_ring *tx_ring;
1673 struct tx_ring_desc *tx_ring_desc;
1674
1675 QL_DUMP_OB_MAC_RSP(mac_rsp);
1676 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1677 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1678 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer13cfd5b2009-07-02 06:06:10 +00001679 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001680 qdev->stats.tx_packets++;
1681 dev_kfree_skb(tx_ring_desc->skb);
1682 tx_ring_desc->skb = NULL;
1683
1684 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1685 OB_MAC_IOCB_RSP_S |
1686 OB_MAC_IOCB_RSP_L |
1687 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1688 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1689 QPRINTK(qdev, TX_DONE, WARNING,
1690 "Total descriptor length did not match transfer length.\n");
1691 }
1692 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1693 QPRINTK(qdev, TX_DONE, WARNING,
1694 "Frame too short to be legal, not sent.\n");
1695 }
1696 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1697 QPRINTK(qdev, TX_DONE, WARNING,
1698 "Frame too long, but sent anyway.\n");
1699 }
1700 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1701 QPRINTK(qdev, TX_DONE, WARNING,
1702 "PCI backplane error. Frame not sent.\n");
1703 }
1704 }
1705 atomic_inc(&tx_ring->tx_count);
1706}
1707
1708/* Fire up a handler to reset the MPI processor. */
1709void ql_queue_fw_error(struct ql_adapter *qdev)
1710{
Ron Mercer6a473302009-07-02 06:06:12 +00001711 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001712 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1713}
1714
1715void ql_queue_asic_error(struct ql_adapter *qdev)
1716{
Ron Mercer6a473302009-07-02 06:06:12 +00001717 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001718 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08001719 /* Clear adapter up bit to signal the recovery
1720 * process that it shouldn't kill the reset worker
1721 * thread
1722 */
1723 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001724 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1725}
1726
1727static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1728 struct ib_ae_iocb_rsp *ib_ae_rsp)
1729{
1730 switch (ib_ae_rsp->event) {
1731 case MGMT_ERR_EVENT:
1732 QPRINTK(qdev, RX_ERR, ERR,
1733 "Management Processor Fatal Error.\n");
1734 ql_queue_fw_error(qdev);
1735 return;
1736
1737 case CAM_LOOKUP_ERR_EVENT:
1738 QPRINTK(qdev, LINK, ERR,
1739 "Multiple CAM hits lookup occurred.\n");
1740 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1741 ql_queue_asic_error(qdev);
1742 return;
1743
1744 case SOFT_ECC_ERROR_EVENT:
1745 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1746 ql_queue_asic_error(qdev);
1747 break;
1748
1749 case PCI_ERR_ANON_BUF_RD:
1750 QPRINTK(qdev, RX_ERR, ERR,
1751 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1752 ib_ae_rsp->q_id);
1753 ql_queue_asic_error(qdev);
1754 break;
1755
1756 default:
1757 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1758 ib_ae_rsp->event);
1759 ql_queue_asic_error(qdev);
1760 break;
1761 }
1762}
1763
1764static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1765{
1766 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001767 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001768 struct ob_mac_iocb_rsp *net_rsp = NULL;
1769 int count = 0;
1770
Ron Mercer1e213302009-03-09 10:59:21 +00001771 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001772 /* While there are entries in the completion queue. */
1773 while (prod != rx_ring->cnsmr_idx) {
1774
1775 QPRINTK(qdev, RX_STATUS, DEBUG,
1776 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1777 prod, rx_ring->cnsmr_idx);
1778
1779 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1780 rmb();
1781 switch (net_rsp->opcode) {
1782
1783 case OPCODE_OB_MAC_TSO_IOCB:
1784 case OPCODE_OB_MAC_IOCB:
1785 ql_process_mac_tx_intr(qdev, net_rsp);
1786 break;
1787 default:
1788 QPRINTK(qdev, RX_STATUS, DEBUG,
1789 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1790 net_rsp->opcode);
1791 }
1792 count++;
1793 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001794 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 }
1796 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00001797 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1798 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1799 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001800 if (atomic_read(&tx_ring->queue_stopped) &&
1801 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1802 /*
1803 * The queue got stopped because the tx_ring was full.
1804 * Wake it up, because it's now at least 25% empty.
1805 */
Ron Mercer1e213302009-03-09 10:59:21 +00001806 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001807 }
1808
1809 return count;
1810}
1811
1812static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1813{
1814 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001815 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 struct ql_net_rsp_iocb *net_rsp;
1817 int count = 0;
1818
1819 /* While there are entries in the completion queue. */
1820 while (prod != rx_ring->cnsmr_idx) {
1821
1822 QPRINTK(qdev, RX_STATUS, DEBUG,
1823 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1824 prod, rx_ring->cnsmr_idx);
1825
1826 net_rsp = rx_ring->curr_entry;
1827 rmb();
1828 switch (net_rsp->opcode) {
1829 case OPCODE_IB_MAC_IOCB:
1830 ql_process_mac_rx_intr(qdev, rx_ring,
1831 (struct ib_mac_iocb_rsp *)
1832 net_rsp);
1833 break;
1834
1835 case OPCODE_IB_AE_IOCB:
1836 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1837 net_rsp);
1838 break;
1839 default:
1840 {
1841 QPRINTK(qdev, RX_STATUS, DEBUG,
1842 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1843 net_rsp->opcode);
1844 }
1845 }
1846 count++;
1847 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00001848 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 if (count == budget)
1850 break;
1851 }
1852 ql_update_buffer_queues(qdev, rx_ring);
1853 ql_write_cq_idx(rx_ring);
1854 return count;
1855}
1856
1857static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1858{
1859 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1860 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00001861 struct rx_ring *trx_ring;
1862 int i, work_done = 0;
1863 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001864
1865 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1866 rx_ring->cq_id);
1867
Ron Mercer39aa8162009-08-27 11:02:11 +00001868 /* Service the TX rings first. They start
1869 * right after the RSS rings. */
1870 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1871 trx_ring = &qdev->rx_ring[i];
1872 /* If this TX completion ring belongs to this vector and
1873 * it's not empty then service it.
1874 */
1875 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1876 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1877 trx_ring->cnsmr_idx)) {
1878 QPRINTK(qdev, INTR, DEBUG,
1879 "%s: Servicing TX completion ring %d.\n",
1880 __func__, trx_ring->cq_id);
1881 ql_clean_outbound_rx_ring(trx_ring);
1882 }
1883 }
1884
1885 /*
1886 * Now service the RSS ring if it's active.
1887 */
1888 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1889 rx_ring->cnsmr_idx) {
1890 QPRINTK(qdev, INTR, DEBUG,
1891 "%s: Servicing RX completion ring %d.\n",
1892 __func__, rx_ring->cq_id);
1893 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1894 }
1895
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001897 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001898 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1899 }
1900 return work_done;
1901}
1902
1903static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1904{
1905 struct ql_adapter *qdev = netdev_priv(ndev);
1906
1907 qdev->vlgrp = grp;
1908 if (grp) {
1909 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1910 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1911 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1912 } else {
1913 QPRINTK(qdev, IFUP, DEBUG,
1914 "Turning off VLAN in NIC_RCV_CFG.\n");
1915 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1916 }
1917}
1918
1919static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1920{
1921 struct ql_adapter *qdev = netdev_priv(ndev);
1922 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00001923 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001924
Ron Mercercc288f52009-02-23 10:42:14 +00001925 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1926 if (status)
1927 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001928 if (ql_set_mac_addr_reg
1929 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1930 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1931 }
Ron Mercercc288f52009-02-23 10:42:14 +00001932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001933}
1934
1935static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1936{
1937 struct ql_adapter *qdev = netdev_priv(ndev);
1938 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00001939 int status;
1940
1941 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1942 if (status)
1943 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001944
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001945 if (ql_set_mac_addr_reg
1946 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1947 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1948 }
Ron Mercercc288f52009-02-23 10:42:14 +00001949 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001950
1951}
1952
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001953/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1954static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1955{
1956 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08001957 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001958 return IRQ_HANDLED;
1959}
1960
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001961/* This handles a fatal error, MPI activity, and the default
1962 * rx_ring in an MSI-X multiple vector environment.
1963 * In MSI/Legacy environment it also process the rest of
1964 * the rx_rings.
1965 */
1966static irqreturn_t qlge_isr(int irq, void *dev_id)
1967{
1968 struct rx_ring *rx_ring = dev_id;
1969 struct ql_adapter *qdev = rx_ring->qdev;
1970 struct intr_context *intr_context = &qdev->intr_context[0];
1971 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001972 int work_done = 0;
1973
Ron Mercerbb0d2152008-10-20 10:30:26 -07001974 spin_lock(&qdev->hw_lock);
1975 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1976 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1977 spin_unlock(&qdev->hw_lock);
1978 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001979 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07001980 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001981
Ron Mercerbb0d2152008-10-20 10:30:26 -07001982 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001983
1984 /*
1985 * Check for fatal error.
1986 */
1987 if (var & STS_FE) {
1988 ql_queue_asic_error(qdev);
1989 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1990 var = ql_read32(qdev, ERR_STS);
1991 QPRINTK(qdev, INTR, ERR,
1992 "Resetting chip. Error Status Register = 0x%x\n", var);
1993 return IRQ_HANDLED;
1994 }
1995
1996 /*
1997 * Check MPI processor activity.
1998 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00001999 if ((var & STS_PI) &&
2000 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002001 /*
2002 * We've got an async event or mailbox completion.
2003 * Handle it and clear the source of the interrupt.
2004 */
2005 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2006 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002007 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2008 queue_delayed_work_on(smp_processor_id(),
2009 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002010 work_done++;
2011 }
2012
2013 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002014 * Get the bit-mask that shows the active queues for this
2015 * pass. Compare it to the queues that this irq services
2016 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002017 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002018 var = ql_read32(qdev, ISR1);
2019 if (var & intr_context->irq_mask) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002020 QPRINTK(qdev, INTR, INFO,
Ron Mercer39aa8162009-08-27 11:02:11 +00002021 "Waking handler for rx_ring[0].\n");
2022 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ben Hutchings288379f2009-01-19 16:43:59 -08002023 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002024 work_done++;
2025 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002026 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002027 return work_done ? IRQ_HANDLED : IRQ_NONE;
2028}
2029
2030static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2031{
2032
2033 if (skb_is_gso(skb)) {
2034 int err;
2035 if (skb_header_cloned(skb)) {
2036 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2037 if (err)
2038 return err;
2039 }
2040
2041 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2042 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2043 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2044 mac_iocb_ptr->total_hdrs_len =
2045 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2046 mac_iocb_ptr->net_trans_offset =
2047 cpu_to_le16(skb_network_offset(skb) |
2048 skb_transport_offset(skb)
2049 << OB_MAC_TRANSPORT_HDR_SHIFT);
2050 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2051 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2052 if (likely(skb->protocol == htons(ETH_P_IP))) {
2053 struct iphdr *iph = ip_hdr(skb);
2054 iph->check = 0;
2055 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2056 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2057 iph->daddr, 0,
2058 IPPROTO_TCP,
2059 0);
2060 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2061 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2062 tcp_hdr(skb)->check =
2063 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2064 &ipv6_hdr(skb)->daddr,
2065 0, IPPROTO_TCP, 0);
2066 }
2067 return 1;
2068 }
2069 return 0;
2070}
2071
2072static void ql_hw_csum_setup(struct sk_buff *skb,
2073 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2074{
2075 int len;
2076 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002077 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002078 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2079 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2080 mac_iocb_ptr->net_trans_offset =
2081 cpu_to_le16(skb_network_offset(skb) |
2082 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2083
2084 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2085 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2086 if (likely(iph->protocol == IPPROTO_TCP)) {
2087 check = &(tcp_hdr(skb)->check);
2088 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2089 mac_iocb_ptr->total_hdrs_len =
2090 cpu_to_le16(skb_transport_offset(skb) +
2091 (tcp_hdr(skb)->doff << 2));
2092 } else {
2093 check = &(udp_hdr(skb)->check);
2094 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2095 mac_iocb_ptr->total_hdrs_len =
2096 cpu_to_le16(skb_transport_offset(skb) +
2097 sizeof(struct udphdr));
2098 }
2099 *check = ~csum_tcpudp_magic(iph->saddr,
2100 iph->daddr, len, iph->protocol, 0);
2101}
2102
Stephen Hemminger613573252009-08-31 19:50:58 +00002103static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002104{
2105 struct tx_ring_desc *tx_ring_desc;
2106 struct ob_mac_iocb_req *mac_iocb_ptr;
2107 struct ql_adapter *qdev = netdev_priv(ndev);
2108 int tso;
2109 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002110 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002111
2112 tx_ring = &qdev->tx_ring[tx_ring_idx];
2113
Ron Mercer74c50b42009-03-09 10:59:27 +00002114 if (skb_padto(skb, ETH_ZLEN))
2115 return NETDEV_TX_OK;
2116
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002117 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2118 QPRINTK(qdev, TX_QUEUED, INFO,
2119 "%s: shutting down tx queue %d du to lack of resources.\n",
2120 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002121 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 atomic_inc(&tx_ring->queue_stopped);
2123 return NETDEV_TX_BUSY;
2124 }
2125 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2126 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002127 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002128
2129 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2130 mac_iocb_ptr->tid = tx_ring_desc->index;
2131 /* We use the upper 32-bits to store the tx queue for this IO.
2132 * When we get the completion we can use it to establish the context.
2133 */
2134 mac_iocb_ptr->txq_idx = tx_ring_idx;
2135 tx_ring_desc->skb = skb;
2136
2137 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2138
2139 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2140 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2141 vlan_tx_tag_get(skb));
2142 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2143 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2144 }
2145 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2146 if (tso < 0) {
2147 dev_kfree_skb_any(skb);
2148 return NETDEV_TX_OK;
2149 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2150 ql_hw_csum_setup(skb,
2151 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2152 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002153 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2154 NETDEV_TX_OK) {
2155 QPRINTK(qdev, TX_QUEUED, ERR,
2156 "Could not map the segments.\n");
2157 return NETDEV_TX_BUSY;
2158 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002159 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2160 tx_ring->prod_idx++;
2161 if (tx_ring->prod_idx == tx_ring->wq_len)
2162 tx_ring->prod_idx = 0;
2163 wmb();
2164
2165 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2167 tx_ring->prod_idx, skb->len);
2168
2169 atomic_dec(&tx_ring->tx_count);
2170 return NETDEV_TX_OK;
2171}
2172
2173static void ql_free_shadow_space(struct ql_adapter *qdev)
2174{
2175 if (qdev->rx_ring_shadow_reg_area) {
2176 pci_free_consistent(qdev->pdev,
2177 PAGE_SIZE,
2178 qdev->rx_ring_shadow_reg_area,
2179 qdev->rx_ring_shadow_reg_dma);
2180 qdev->rx_ring_shadow_reg_area = NULL;
2181 }
2182 if (qdev->tx_ring_shadow_reg_area) {
2183 pci_free_consistent(qdev->pdev,
2184 PAGE_SIZE,
2185 qdev->tx_ring_shadow_reg_area,
2186 qdev->tx_ring_shadow_reg_dma);
2187 qdev->tx_ring_shadow_reg_area = NULL;
2188 }
2189}
2190
2191static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2192{
2193 qdev->rx_ring_shadow_reg_area =
2194 pci_alloc_consistent(qdev->pdev,
2195 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2196 if (qdev->rx_ring_shadow_reg_area == NULL) {
2197 QPRINTK(qdev, IFUP, ERR,
2198 "Allocation of RX shadow space failed.\n");
2199 return -ENOMEM;
2200 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002201 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002202 qdev->tx_ring_shadow_reg_area =
2203 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2204 &qdev->tx_ring_shadow_reg_dma);
2205 if (qdev->tx_ring_shadow_reg_area == NULL) {
2206 QPRINTK(qdev, IFUP, ERR,
2207 "Allocation of TX shadow space failed.\n");
2208 goto err_wqp_sh_area;
2209 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002210 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002211 return 0;
2212
2213err_wqp_sh_area:
2214 pci_free_consistent(qdev->pdev,
2215 PAGE_SIZE,
2216 qdev->rx_ring_shadow_reg_area,
2217 qdev->rx_ring_shadow_reg_dma);
2218 return -ENOMEM;
2219}
2220
2221static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2222{
2223 struct tx_ring_desc *tx_ring_desc;
2224 int i;
2225 struct ob_mac_iocb_req *mac_iocb_ptr;
2226
2227 mac_iocb_ptr = tx_ring->wq_base;
2228 tx_ring_desc = tx_ring->q;
2229 for (i = 0; i < tx_ring->wq_len; i++) {
2230 tx_ring_desc->index = i;
2231 tx_ring_desc->skb = NULL;
2232 tx_ring_desc->queue_entry = mac_iocb_ptr;
2233 mac_iocb_ptr++;
2234 tx_ring_desc++;
2235 }
2236 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2237 atomic_set(&tx_ring->queue_stopped, 0);
2238}
2239
2240static void ql_free_tx_resources(struct ql_adapter *qdev,
2241 struct tx_ring *tx_ring)
2242{
2243 if (tx_ring->wq_base) {
2244 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2245 tx_ring->wq_base, tx_ring->wq_base_dma);
2246 tx_ring->wq_base = NULL;
2247 }
2248 kfree(tx_ring->q);
2249 tx_ring->q = NULL;
2250}
2251
2252static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2253 struct tx_ring *tx_ring)
2254{
2255 tx_ring->wq_base =
2256 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2257 &tx_ring->wq_base_dma);
2258
2259 if ((tx_ring->wq_base == NULL)
Ron Mercer88c55e32009-06-10 15:49:33 +00002260 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002261 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2262 return -ENOMEM;
2263 }
2264 tx_ring->q =
2265 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2266 if (tx_ring->q == NULL)
2267 goto err;
2268
2269 return 0;
2270err:
2271 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2272 tx_ring->wq_base, tx_ring->wq_base_dma);
2273 return -ENOMEM;
2274}
2275
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002276static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002277{
2278 int i;
2279 struct bq_desc *lbq_desc;
2280
2281 for (i = 0; i < rx_ring->lbq_len; i++) {
2282 lbq_desc = &rx_ring->lbq[i];
2283 if (lbq_desc->p.lbq_page) {
2284 pci_unmap_page(qdev->pdev,
2285 pci_unmap_addr(lbq_desc, mapaddr),
2286 pci_unmap_len(lbq_desc, maplen),
2287 PCI_DMA_FROMDEVICE);
2288
2289 put_page(lbq_desc->p.lbq_page);
2290 lbq_desc->p.lbq_page = NULL;
2291 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002292 }
2293}
2294
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002295static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002296{
2297 int i;
2298 struct bq_desc *sbq_desc;
2299
2300 for (i = 0; i < rx_ring->sbq_len; i++) {
2301 sbq_desc = &rx_ring->sbq[i];
2302 if (sbq_desc == NULL) {
2303 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2304 return;
2305 }
2306 if (sbq_desc->p.skb) {
2307 pci_unmap_single(qdev->pdev,
2308 pci_unmap_addr(sbq_desc, mapaddr),
2309 pci_unmap_len(sbq_desc, maplen),
2310 PCI_DMA_FROMDEVICE);
2311 dev_kfree_skb(sbq_desc->p.skb);
2312 sbq_desc->p.skb = NULL;
2313 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002314 }
2315}
2316
Ron Mercer4545a3f2009-02-23 10:42:17 +00002317/* Free all large and small rx buffers associated
2318 * with the completion queues for this device.
2319 */
2320static void ql_free_rx_buffers(struct ql_adapter *qdev)
2321{
2322 int i;
2323 struct rx_ring *rx_ring;
2324
2325 for (i = 0; i < qdev->rx_ring_count; i++) {
2326 rx_ring = &qdev->rx_ring[i];
2327 if (rx_ring->lbq)
2328 ql_free_lbq_buffers(qdev, rx_ring);
2329 if (rx_ring->sbq)
2330 ql_free_sbq_buffers(qdev, rx_ring);
2331 }
2332}
2333
2334static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2335{
2336 struct rx_ring *rx_ring;
2337 int i;
2338
2339 for (i = 0; i < qdev->rx_ring_count; i++) {
2340 rx_ring = &qdev->rx_ring[i];
2341 if (rx_ring->type != TX_Q)
2342 ql_update_buffer_queues(qdev, rx_ring);
2343 }
2344}
2345
2346static void ql_init_lbq_ring(struct ql_adapter *qdev,
2347 struct rx_ring *rx_ring)
2348{
2349 int i;
2350 struct bq_desc *lbq_desc;
2351 __le64 *bq = rx_ring->lbq_base;
2352
2353 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2354 for (i = 0; i < rx_ring->lbq_len; i++) {
2355 lbq_desc = &rx_ring->lbq[i];
2356 memset(lbq_desc, 0, sizeof(*lbq_desc));
2357 lbq_desc->index = i;
2358 lbq_desc->addr = bq;
2359 bq++;
2360 }
2361}
2362
2363static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002364 struct rx_ring *rx_ring)
2365{
2366 int i;
2367 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002368 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002369
Ron Mercer4545a3f2009-02-23 10:42:17 +00002370 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002371 for (i = 0; i < rx_ring->sbq_len; i++) {
2372 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002373 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002374 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002375 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002376 bq++;
2377 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002378}
2379
2380static void ql_free_rx_resources(struct ql_adapter *qdev,
2381 struct rx_ring *rx_ring)
2382{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002383 /* Free the small buffer queue. */
2384 if (rx_ring->sbq_base) {
2385 pci_free_consistent(qdev->pdev,
2386 rx_ring->sbq_size,
2387 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2388 rx_ring->sbq_base = NULL;
2389 }
2390
2391 /* Free the small buffer queue control blocks. */
2392 kfree(rx_ring->sbq);
2393 rx_ring->sbq = NULL;
2394
2395 /* Free the large buffer queue. */
2396 if (rx_ring->lbq_base) {
2397 pci_free_consistent(qdev->pdev,
2398 rx_ring->lbq_size,
2399 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2400 rx_ring->lbq_base = NULL;
2401 }
2402
2403 /* Free the large buffer queue control blocks. */
2404 kfree(rx_ring->lbq);
2405 rx_ring->lbq = NULL;
2406
2407 /* Free the rx queue. */
2408 if (rx_ring->cq_base) {
2409 pci_free_consistent(qdev->pdev,
2410 rx_ring->cq_size,
2411 rx_ring->cq_base, rx_ring->cq_base_dma);
2412 rx_ring->cq_base = NULL;
2413 }
2414}
2415
2416/* Allocate queues and buffers for this completions queue based
2417 * on the values in the parameter structure. */
2418static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2419 struct rx_ring *rx_ring)
2420{
2421
2422 /*
2423 * Allocate the completion queue for this rx_ring.
2424 */
2425 rx_ring->cq_base =
2426 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2427 &rx_ring->cq_base_dma);
2428
2429 if (rx_ring->cq_base == NULL) {
2430 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2431 return -ENOMEM;
2432 }
2433
2434 if (rx_ring->sbq_len) {
2435 /*
2436 * Allocate small buffer queue.
2437 */
2438 rx_ring->sbq_base =
2439 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2440 &rx_ring->sbq_base_dma);
2441
2442 if (rx_ring->sbq_base == NULL) {
2443 QPRINTK(qdev, IFUP, ERR,
2444 "Small buffer queue allocation failed.\n");
2445 goto err_mem;
2446 }
2447
2448 /*
2449 * Allocate small buffer queue control blocks.
2450 */
2451 rx_ring->sbq =
2452 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2453 GFP_KERNEL);
2454 if (rx_ring->sbq == NULL) {
2455 QPRINTK(qdev, IFUP, ERR,
2456 "Small buffer queue control block allocation failed.\n");
2457 goto err_mem;
2458 }
2459
Ron Mercer4545a3f2009-02-23 10:42:17 +00002460 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002461 }
2462
2463 if (rx_ring->lbq_len) {
2464 /*
2465 * Allocate large buffer queue.
2466 */
2467 rx_ring->lbq_base =
2468 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2469 &rx_ring->lbq_base_dma);
2470
2471 if (rx_ring->lbq_base == NULL) {
2472 QPRINTK(qdev, IFUP, ERR,
2473 "Large buffer queue allocation failed.\n");
2474 goto err_mem;
2475 }
2476 /*
2477 * Allocate large buffer queue control blocks.
2478 */
2479 rx_ring->lbq =
2480 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2481 GFP_KERNEL);
2482 if (rx_ring->lbq == NULL) {
2483 QPRINTK(qdev, IFUP, ERR,
2484 "Large buffer queue control block allocation failed.\n");
2485 goto err_mem;
2486 }
2487
Ron Mercer4545a3f2009-02-23 10:42:17 +00002488 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002489 }
2490
2491 return 0;
2492
2493err_mem:
2494 ql_free_rx_resources(qdev, rx_ring);
2495 return -ENOMEM;
2496}
2497
2498static void ql_tx_ring_clean(struct ql_adapter *qdev)
2499{
2500 struct tx_ring *tx_ring;
2501 struct tx_ring_desc *tx_ring_desc;
2502 int i, j;
2503
2504 /*
2505 * Loop through all queues and free
2506 * any resources.
2507 */
2508 for (j = 0; j < qdev->tx_ring_count; j++) {
2509 tx_ring = &qdev->tx_ring[j];
2510 for (i = 0; i < tx_ring->wq_len; i++) {
2511 tx_ring_desc = &tx_ring->q[i];
2512 if (tx_ring_desc && tx_ring_desc->skb) {
2513 QPRINTK(qdev, IFDOWN, ERR,
2514 "Freeing lost SKB %p, from queue %d, index %d.\n",
2515 tx_ring_desc->skb, j,
2516 tx_ring_desc->index);
2517 ql_unmap_send(qdev, tx_ring_desc,
2518 tx_ring_desc->map_cnt);
2519 dev_kfree_skb(tx_ring_desc->skb);
2520 tx_ring_desc->skb = NULL;
2521 }
2522 }
2523 }
2524}
2525
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002526static void ql_free_mem_resources(struct ql_adapter *qdev)
2527{
2528 int i;
2529
2530 for (i = 0; i < qdev->tx_ring_count; i++)
2531 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2532 for (i = 0; i < qdev->rx_ring_count; i++)
2533 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2534 ql_free_shadow_space(qdev);
2535}
2536
2537static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2538{
2539 int i;
2540
2541 /* Allocate space for our shadow registers and such. */
2542 if (ql_alloc_shadow_space(qdev))
2543 return -ENOMEM;
2544
2545 for (i = 0; i < qdev->rx_ring_count; i++) {
2546 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2547 QPRINTK(qdev, IFUP, ERR,
2548 "RX resource allocation failed.\n");
2549 goto err_mem;
2550 }
2551 }
2552 /* Allocate tx queue resources */
2553 for (i = 0; i < qdev->tx_ring_count; i++) {
2554 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2555 QPRINTK(qdev, IFUP, ERR,
2556 "TX resource allocation failed.\n");
2557 goto err_mem;
2558 }
2559 }
2560 return 0;
2561
2562err_mem:
2563 ql_free_mem_resources(qdev);
2564 return -ENOMEM;
2565}
2566
2567/* Set up the rx ring control block and pass it to the chip.
2568 * The control block is defined as
2569 * "Completion Queue Initialization Control Block", or cqicb.
2570 */
2571static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2572{
2573 struct cqicb *cqicb = &rx_ring->cqicb;
2574 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002575 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002576 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002577 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002578 void __iomem *doorbell_area =
2579 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2580 int err = 0;
2581 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002582 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002583 __le64 *base_indirect_ptr;
2584 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002585
2586 /* Set up the shadow registers for this ring. */
2587 rx_ring->prod_idx_sh_reg = shadow_reg;
2588 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2589 shadow_reg += sizeof(u64);
2590 shadow_reg_dma += sizeof(u64);
2591 rx_ring->lbq_base_indirect = shadow_reg;
2592 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002593 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2594 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002595 rx_ring->sbq_base_indirect = shadow_reg;
2596 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2597
2598 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002599 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002600 rx_ring->cnsmr_idx = 0;
2601 rx_ring->curr_entry = rx_ring->cq_base;
2602
2603 /* PCI doorbell mem area + 0x04 for valid register */
2604 rx_ring->valid_db_reg = doorbell_area + 0x04;
2605
2606 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002607 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002608
2609 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002610 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002611
2612 memset((void *)cqicb, 0, sizeof(struct cqicb));
2613 cqicb->msix_vect = rx_ring->irq;
2614
Ron Mercer459caf52009-01-04 17:08:11 -08002615 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2616 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002617
Ron Mercer97345522009-01-09 11:31:50 +00002618 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002619
Ron Mercer97345522009-01-09 11:31:50 +00002620 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002621
2622 /*
2623 * Set up the control block load flags.
2624 */
2625 cqicb->flags = FLAGS_LC | /* Load queue base address */
2626 FLAGS_LV | /* Load MSI-X vector */
2627 FLAGS_LI; /* Load irq delay values */
2628 if (rx_ring->lbq_len) {
2629 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002630 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002631 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2632 page_entries = 0;
2633 do {
2634 *base_indirect_ptr = cpu_to_le64(tmp);
2635 tmp += DB_PAGE_SIZE;
2636 base_indirect_ptr++;
2637 page_entries++;
2638 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002639 cqicb->lbq_addr =
2640 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08002641 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2642 (u16) rx_ring->lbq_buf_size;
2643 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2644 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2645 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002646 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002647 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002648 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002649 rx_ring->lbq_clean_idx = 0;
2650 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002651 }
2652 if (rx_ring->sbq_len) {
2653 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07002654 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00002655 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2656 page_entries = 0;
2657 do {
2658 *base_indirect_ptr = cpu_to_le64(tmp);
2659 tmp += DB_PAGE_SIZE;
2660 base_indirect_ptr++;
2661 page_entries++;
2662 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00002663 cqicb->sbq_addr =
2664 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002665 cqicb->sbq_buf_size =
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002666 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
Ron Mercer459caf52009-01-04 17:08:11 -08002667 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2668 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002669 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00002670 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002671 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00002672 rx_ring->sbq_clean_idx = 0;
2673 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002674 }
2675 switch (rx_ring->type) {
2676 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002677 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2678 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2679 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002680 case RX_Q:
2681 /* Inbound completion handling rx_rings run in
2682 * separate NAPI contexts.
2683 */
2684 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2685 64);
2686 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2687 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2688 break;
2689 default:
2690 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2691 rx_ring->type);
2692 }
Ron Mercer49740972009-02-26 10:08:36 +00002693 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002694 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2695 CFG_LCQ, rx_ring->cq_id);
2696 if (err) {
2697 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2698 return err;
2699 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002700 return err;
2701}
2702
2703static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2704{
2705 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2706 void __iomem *doorbell_area =
2707 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2708 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2709 (tx_ring->wq_id * sizeof(u64));
2710 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2711 (tx_ring->wq_id * sizeof(u64));
2712 int err = 0;
2713
2714 /*
2715 * Assign doorbell registers for this tx_ring.
2716 */
2717 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002718 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002719 tx_ring->prod_idx = 0;
2720 /* TX PCI doorbell mem area + 0x04 */
2721 tx_ring->valid_db_reg = doorbell_area + 0x04;
2722
2723 /*
2724 * Assign shadow registers for this tx_ring.
2725 */
2726 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2727 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2728
2729 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2730 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2731 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2732 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2733 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00002734 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002735
Ron Mercer97345522009-01-09 11:31:50 +00002736 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002737
2738 ql_init_tx_ring(qdev, tx_ring);
2739
Ron Mercere3324712009-07-02 06:06:13 +00002740 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002741 (u16) tx_ring->wq_id);
2742 if (err) {
2743 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2744 return err;
2745 }
Ron Mercer49740972009-02-26 10:08:36 +00002746 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747 return err;
2748}
2749
2750static void ql_disable_msix(struct ql_adapter *qdev)
2751{
2752 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2753 pci_disable_msix(qdev->pdev);
2754 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2755 kfree(qdev->msi_x_entry);
2756 qdev->msi_x_entry = NULL;
2757 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2758 pci_disable_msi(qdev->pdev);
2759 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2760 }
2761}
2762
Ron Mercera4ab6132009-08-27 11:02:10 +00002763/* We start by trying to get the number of vectors
2764 * stored in qdev->intr_count. If we don't get that
2765 * many then we reduce the count and try again.
2766 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002767static void ql_enable_msix(struct ql_adapter *qdev)
2768{
Ron Mercera4ab6132009-08-27 11:02:10 +00002769 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002770
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002771 /* Get the MSIX vectors. */
2772 if (irq_type == MSIX_IRQ) {
2773 /* Try to alloc space for the msix struct,
2774 * if it fails then go to MSI/legacy.
2775 */
Ron Mercera4ab6132009-08-27 11:02:10 +00002776 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002777 sizeof(struct msix_entry),
2778 GFP_KERNEL);
2779 if (!qdev->msi_x_entry) {
2780 irq_type = MSI_IRQ;
2781 goto msi;
2782 }
2783
Ron Mercera4ab6132009-08-27 11:02:10 +00002784 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002785 qdev->msi_x_entry[i].entry = i;
2786
Ron Mercera4ab6132009-08-27 11:02:10 +00002787 /* Loop to get our vectors. We start with
2788 * what we want and settle for what we get.
2789 */
2790 do {
2791 err = pci_enable_msix(qdev->pdev,
2792 qdev->msi_x_entry, qdev->intr_count);
2793 if (err > 0)
2794 qdev->intr_count = err;
2795 } while (err > 0);
2796
2797 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002798 kfree(qdev->msi_x_entry);
2799 qdev->msi_x_entry = NULL;
2800 QPRINTK(qdev, IFUP, WARNING,
2801 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00002802 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002803 irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00002804 } else if (err == 0) {
2805 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2806 QPRINTK(qdev, IFUP, INFO,
2807 "MSI-X Enabled, got %d vectors.\n",
2808 qdev->intr_count);
2809 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002810 }
2811 }
2812msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00002813 qdev->intr_count = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002814 if (irq_type == MSI_IRQ) {
2815 if (!pci_enable_msi(qdev->pdev)) {
2816 set_bit(QL_MSI_ENABLED, &qdev->flags);
2817 QPRINTK(qdev, IFUP, INFO,
2818 "Running with MSI interrupts.\n");
2819 return;
2820 }
2821 }
2822 irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002823 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2824}
2825
Ron Mercer39aa8162009-08-27 11:02:11 +00002826/* Each vector services 1 RSS ring and and 1 or more
2827 * TX completion rings. This function loops through
2828 * the TX completion rings and assigns the vector that
2829 * will service it. An example would be if there are
2830 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2831 * This would mean that vector 0 would service RSS ring 0
2832 * and TX competion rings 0,1,2 and 3. Vector 1 would
2833 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2834 */
2835static void ql_set_tx_vect(struct ql_adapter *qdev)
2836{
2837 int i, j, vect;
2838 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2839
2840 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2841 /* Assign irq vectors to TX rx_rings.*/
2842 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2843 i < qdev->rx_ring_count; i++) {
2844 if (j == tx_rings_per_vector) {
2845 vect++;
2846 j = 0;
2847 }
2848 qdev->rx_ring[i].irq = vect;
2849 j++;
2850 }
2851 } else {
2852 /* For single vector all rings have an irq
2853 * of zero.
2854 */
2855 for (i = 0; i < qdev->rx_ring_count; i++)
2856 qdev->rx_ring[i].irq = 0;
2857 }
2858}
2859
2860/* Set the interrupt mask for this vector. Each vector
2861 * will service 1 RSS ring and 1 or more TX completion
2862 * rings. This function sets up a bit mask per vector
2863 * that indicates which rings it services.
2864 */
2865static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2866{
2867 int j, vect = ctx->intr;
2868 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2869
2870 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2871 /* Add the RSS ring serviced by this vector
2872 * to the mask.
2873 */
2874 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2875 /* Add the TX ring(s) serviced by this vector
2876 * to the mask. */
2877 for (j = 0; j < tx_rings_per_vector; j++) {
2878 ctx->irq_mask |=
2879 (1 << qdev->rx_ring[qdev->rss_ring_count +
2880 (vect * tx_rings_per_vector) + j].cq_id);
2881 }
2882 } else {
2883 /* For single vector we just shift each queue's
2884 * ID into the mask.
2885 */
2886 for (j = 0; j < qdev->rx_ring_count; j++)
2887 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2888 }
2889}
2890
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002891/*
2892 * Here we build the intr_context structures based on
2893 * our rx_ring count and intr vector count.
2894 * The intr_context structure is used to hook each vector
2895 * to possibly different handlers.
2896 */
2897static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2898{
2899 int i = 0;
2900 struct intr_context *intr_context = &qdev->intr_context[0];
2901
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002902 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2903 /* Each rx_ring has it's
2904 * own intr_context since we have separate
2905 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002906 */
2907 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2908 qdev->rx_ring[i].irq = i;
2909 intr_context->intr = i;
2910 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002911 /* Set up this vector's bit-mask that indicates
2912 * which queues it services.
2913 */
2914 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002915 /*
2916 * We set up each vectors enable/disable/read bits so
2917 * there's no bit/mask calculations in the critical path.
2918 */
2919 intr_context->intr_en_mask =
2920 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2921 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2922 | i;
2923 intr_context->intr_dis_mask =
2924 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2925 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2926 INTR_EN_IHD | i;
2927 intr_context->intr_read_mask =
2928 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2929 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2930 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00002931 if (i == 0) {
2932 /* The first vector/queue handles
2933 * broadcast/multicast, fatal errors,
2934 * and firmware events. This in addition
2935 * to normal inbound NAPI processing.
2936 */
2937 intr_context->handler = qlge_isr;
2938 sprintf(intr_context->name, "%s-rx-%d",
2939 qdev->ndev->name, i);
2940 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941 /*
2942 * Inbound queues handle unicast frames only.
2943 */
2944 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00002945 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002946 qdev->ndev->name, i);
2947 }
2948 }
2949 } else {
2950 /*
2951 * All rx_rings use the same intr_context since
2952 * there is only one vector.
2953 */
2954 intr_context->intr = 0;
2955 intr_context->qdev = qdev;
2956 /*
2957 * We set up each vectors enable/disable/read bits so
2958 * there's no bit/mask calculations in the critical path.
2959 */
2960 intr_context->intr_en_mask =
2961 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2962 intr_context->intr_dis_mask =
2963 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2964 INTR_EN_TYPE_DISABLE;
2965 intr_context->intr_read_mask =
2966 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2967 /*
2968 * Single interrupt means one handler for all rings.
2969 */
2970 intr_context->handler = qlge_isr;
2971 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00002972 /* Set up this vector's bit-mask that indicates
2973 * which queues it services. In this case there is
2974 * a single vector so it will service all RSS and
2975 * TX completion rings.
2976 */
2977 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002978 }
Ron Mercer39aa8162009-08-27 11:02:11 +00002979 /* Tell the TX completion rings which MSIx vector
2980 * they will be using.
2981 */
2982 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002983}
2984
2985static void ql_free_irq(struct ql_adapter *qdev)
2986{
2987 int i;
2988 struct intr_context *intr_context = &qdev->intr_context[0];
2989
2990 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2991 if (intr_context->hooked) {
2992 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2993 free_irq(qdev->msi_x_entry[i].vector,
2994 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00002995 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002996 "freeing msix interrupt %d.\n", i);
2997 } else {
2998 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00002999 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003000 "freeing msi interrupt %d.\n", i);
3001 }
3002 }
3003 }
3004 ql_disable_msix(qdev);
3005}
3006
3007static int ql_request_irq(struct ql_adapter *qdev)
3008{
3009 int i;
3010 int status = 0;
3011 struct pci_dev *pdev = qdev->pdev;
3012 struct intr_context *intr_context = &qdev->intr_context[0];
3013
3014 ql_resolve_queues_to_irqs(qdev);
3015
3016 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3017 atomic_set(&intr_context->irq_cnt, 0);
3018 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3019 status = request_irq(qdev->msi_x_entry[i].vector,
3020 intr_context->handler,
3021 0,
3022 intr_context->name,
3023 &qdev->rx_ring[i]);
3024 if (status) {
3025 QPRINTK(qdev, IFUP, ERR,
3026 "Failed request for MSIX interrupt %d.\n",
3027 i);
3028 goto err_irq;
3029 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003030 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003031 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3032 i,
3033 qdev->rx_ring[i].type ==
3034 DEFAULT_Q ? "DEFAULT_Q" : "",
3035 qdev->rx_ring[i].type ==
3036 TX_Q ? "TX_Q" : "",
3037 qdev->rx_ring[i].type ==
3038 RX_Q ? "RX_Q" : "", intr_context->name);
3039 }
3040 } else {
3041 QPRINTK(qdev, IFUP, DEBUG,
3042 "trying msi or legacy interrupts.\n");
3043 QPRINTK(qdev, IFUP, DEBUG,
3044 "%s: irq = %d.\n", __func__, pdev->irq);
3045 QPRINTK(qdev, IFUP, DEBUG,
3046 "%s: context->name = %s.\n", __func__,
3047 intr_context->name);
3048 QPRINTK(qdev, IFUP, DEBUG,
3049 "%s: dev_id = 0x%p.\n", __func__,
3050 &qdev->rx_ring[0]);
3051 status =
3052 request_irq(pdev->irq, qlge_isr,
3053 test_bit(QL_MSI_ENABLED,
3054 &qdev->
3055 flags) ? 0 : IRQF_SHARED,
3056 intr_context->name, &qdev->rx_ring[0]);
3057 if (status)
3058 goto err_irq;
3059
3060 QPRINTK(qdev, IFUP, ERR,
3061 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3062 i,
3063 qdev->rx_ring[0].type ==
3064 DEFAULT_Q ? "DEFAULT_Q" : "",
3065 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3066 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3067 intr_context->name);
3068 }
3069 intr_context->hooked = 1;
3070 }
3071 return status;
3072err_irq:
3073 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3074 ql_free_irq(qdev);
3075 return status;
3076}
3077
3078static int ql_start_rss(struct ql_adapter *qdev)
3079{
Ron Mercer541ae282009-10-08 09:54:37 +00003080 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3081 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3082 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3083 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3084 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3085 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003086 struct ricb *ricb = &qdev->ricb;
3087 int status = 0;
3088 int i;
3089 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3090
Ron Mercere3324712009-07-02 06:06:13 +00003091 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003092
Ron Mercerb2014ff2009-08-27 11:02:09 +00003093 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003094 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003095 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3096 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003097
3098 /*
3099 * Fill out the Indirection Table.
3100 */
Ron Mercer541ae282009-10-08 09:54:37 +00003101 for (i = 0; i < 1024; i++)
3102 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003103
Ron Mercer541ae282009-10-08 09:54:37 +00003104 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3105 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003106
Ron Mercer49740972009-02-26 10:08:36 +00003107 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003108
Ron Mercere3324712009-07-02 06:06:13 +00003109 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003110 if (status) {
3111 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3112 return status;
3113 }
Ron Mercer49740972009-02-26 10:08:36 +00003114 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003115 return status;
3116}
3117
Ron Mercera5f59dc2009-07-02 06:06:07 +00003118static int ql_clear_routing_entries(struct ql_adapter *qdev)
3119{
3120 int i, status = 0;
3121
3122 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3123 if (status)
3124 return status;
3125 /* Clear all the entries in the routing table. */
3126 for (i = 0; i < 16; i++) {
3127 status = ql_set_routing_reg(qdev, i, 0, 0);
3128 if (status) {
3129 QPRINTK(qdev, IFUP, ERR,
3130 "Failed to init routing register for CAM "
3131 "packets.\n");
3132 break;
3133 }
3134 }
3135 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3136 return status;
3137}
3138
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003139/* Initialize the frame-to-queue routing. */
3140static int ql_route_initialize(struct ql_adapter *qdev)
3141{
3142 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003143
3144 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003145 status = ql_clear_routing_entries(qdev);
3146 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003147 return status;
3148
3149 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3150 if (status)
3151 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152
3153 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3154 if (status) {
3155 QPRINTK(qdev, IFUP, ERR,
3156 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003157 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158 }
3159 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3160 if (status) {
3161 QPRINTK(qdev, IFUP, ERR,
3162 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003163 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003164 }
3165 /* If we have more than one inbound queue, then turn on RSS in the
3166 * routing block.
3167 */
3168 if (qdev->rss_ring_count > 1) {
3169 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3170 RT_IDX_RSS_MATCH, 1);
3171 if (status) {
3172 QPRINTK(qdev, IFUP, ERR,
3173 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003174 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003175 }
3176 }
3177
3178 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3179 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003180 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003181 QPRINTK(qdev, IFUP, ERR,
3182 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003183exit:
3184 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003185 return status;
3186}
3187
Ron Mercer2ee1e272009-03-03 12:10:33 +00003188int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003189{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003190 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003191
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003192 /* If check if the link is up and use to
3193 * determine if we are setting or clearing
3194 * the MAC address in the CAM.
3195 */
3196 set = ql_read32(qdev, STS);
3197 set &= qdev->port_link_up;
3198 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003199 if (status) {
3200 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3201 return status;
3202 }
3203
3204 status = ql_route_initialize(qdev);
3205 if (status)
3206 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3207
3208 return status;
3209}
3210
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003211static int ql_adapter_initialize(struct ql_adapter *qdev)
3212{
3213 u32 value, mask;
3214 int i;
3215 int status = 0;
3216
3217 /*
3218 * Set up the System register to halt on errors.
3219 */
3220 value = SYS_EFE | SYS_FAE;
3221 mask = value << 16;
3222 ql_write32(qdev, SYS, mask | value);
3223
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003224 /* Set the default queue, and VLAN behavior. */
3225 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3226 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003227 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3228
3229 /* Set the MPI interrupt to enabled. */
3230 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3231
3232 /* Enable the function, set pagesize, enable error checking. */
3233 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3234 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3235
3236 /* Set/clear header splitting. */
3237 mask = FSC_VM_PAGESIZE_MASK |
3238 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3239 ql_write32(qdev, FSC, mask | value);
3240
3241 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3242 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3243
3244 /* Start up the rx queues. */
3245 for (i = 0; i < qdev->rx_ring_count; i++) {
3246 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3247 if (status) {
3248 QPRINTK(qdev, IFUP, ERR,
3249 "Failed to start rx ring[%d].\n", i);
3250 return status;
3251 }
3252 }
3253
3254 /* If there is more than one inbound completion queue
3255 * then download a RICB to configure RSS.
3256 */
3257 if (qdev->rss_ring_count > 1) {
3258 status = ql_start_rss(qdev);
3259 if (status) {
3260 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3261 return status;
3262 }
3263 }
3264
3265 /* Start up the tx queues. */
3266 for (i = 0; i < qdev->tx_ring_count; i++) {
3267 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3268 if (status) {
3269 QPRINTK(qdev, IFUP, ERR,
3270 "Failed to start tx ring[%d].\n", i);
3271 return status;
3272 }
3273 }
3274
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003275 /* Initialize the port and set the max framesize. */
3276 status = qdev->nic_ops->port_initialize(qdev);
3277 if (status) {
3278 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3279 return status;
3280 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003281
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003282 /* Set up the MAC address and frame routing filter. */
3283 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003284 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003285 QPRINTK(qdev, IFUP, ERR,
3286 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003287 return status;
3288 }
3289
3290 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003291 for (i = 0; i < qdev->rss_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003292 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003293 i);
3294 napi_enable(&qdev->rx_ring[i].napi);
3295 }
3296
3297 return status;
3298}
3299
3300/* Issue soft reset to chip. */
3301static int ql_adapter_reset(struct ql_adapter *qdev)
3302{
3303 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003304 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003305 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003306
Ron Mercera5f59dc2009-07-02 06:06:07 +00003307 /* Clear all the entries in the routing table. */
3308 status = ql_clear_routing_entries(qdev);
3309 if (status) {
3310 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3311 return status;
3312 }
3313
3314 end_jiffies = jiffies +
3315 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003316 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003317
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003318 do {
3319 value = ql_read32(qdev, RST_FO);
3320 if ((value & RST_FO_FR) == 0)
3321 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003322 cpu_relax();
3323 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003324
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003325 if (value & RST_FO_FR) {
3326 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003327 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003328 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003329 }
3330
3331 return status;
3332}
3333
3334static void ql_display_dev_info(struct net_device *ndev)
3335{
3336 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3337
3338 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003339 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003340 "XG Roll = %d, XG Rev = %d.\n",
3341 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003342 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003343 qdev->chip_rev_id & 0x0000000f,
3344 qdev->chip_rev_id >> 4 & 0x0000000f,
3345 qdev->chip_rev_id >> 8 & 0x0000000f,
3346 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003347 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003348}
3349
3350static int ql_adapter_down(struct ql_adapter *qdev)
3351{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003352 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003353
Ron Mercer6a473302009-07-02 06:06:12 +00003354 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003355
Ron Mercer6497b602009-02-12 16:37:13 -08003356 /* Don't kill the reset worker thread if we
3357 * are in the process of recovery.
3358 */
3359 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3360 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003361 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3362 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003363 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003364 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003365
Ron Mercer39aa8162009-08-27 11:02:11 +00003366 for (i = 0; i < qdev->rss_ring_count; i++)
3367 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003368
3369 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3370
3371 ql_disable_interrupts(qdev);
3372
3373 ql_tx_ring_clean(qdev);
3374
Ron Mercer6b318cb2009-03-09 10:59:26 +00003375 /* Call netif_napi_del() from common point.
3376 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003377 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003378 netif_napi_del(&qdev->rx_ring[i].napi);
3379
Ron Mercer4545a3f2009-02-23 10:42:17 +00003380 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003381
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003382 status = ql_adapter_reset(qdev);
3383 if (status)
3384 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3385 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003386 return status;
3387}
3388
3389static int ql_adapter_up(struct ql_adapter *qdev)
3390{
3391 int err = 0;
3392
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003393 err = ql_adapter_initialize(qdev);
3394 if (err) {
3395 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003396 goto err_init;
3397 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003398 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003399 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003400 /* If the port is initialized and the
3401 * link is up the turn on the carrier.
3402 */
3403 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3404 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003405 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003406 ql_enable_interrupts(qdev);
3407 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003408 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003409
3410 return 0;
3411err_init:
3412 ql_adapter_reset(qdev);
3413 return err;
3414}
3415
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003416static void ql_release_adapter_resources(struct ql_adapter *qdev)
3417{
3418 ql_free_mem_resources(qdev);
3419 ql_free_irq(qdev);
3420}
3421
3422static int ql_get_adapter_resources(struct ql_adapter *qdev)
3423{
3424 int status = 0;
3425
3426 if (ql_alloc_mem_resources(qdev)) {
3427 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3428 return -ENOMEM;
3429 }
3430 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003431 return status;
3432}
3433
3434static int qlge_close(struct net_device *ndev)
3435{
3436 struct ql_adapter *qdev = netdev_priv(ndev);
3437
3438 /*
3439 * Wait for device to recover from a reset.
3440 * (Rarely happens, but possible.)
3441 */
3442 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3443 msleep(1);
3444 ql_adapter_down(qdev);
3445 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003446 return 0;
3447}
3448
3449static int ql_configure_rings(struct ql_adapter *qdev)
3450{
3451 int i;
3452 struct rx_ring *rx_ring;
3453 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003454 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003455
Ron Mercera4ab6132009-08-27 11:02:10 +00003456 /* In a perfect world we have one RSS ring for each CPU
3457 * and each has it's own vector. To do that we ask for
3458 * cpu_cnt vectors. ql_enable_msix() will adjust the
3459 * vector count to what we actually get. We then
3460 * allocate an RSS ring for each.
3461 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003462 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003463 qdev->intr_count = cpu_cnt;
3464 ql_enable_msix(qdev);
3465 /* Adjust the RSS ring count to the actual vector count. */
3466 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003467 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003468 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003469
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003470 for (i = 0; i < qdev->tx_ring_count; i++) {
3471 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003472 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003473 tx_ring->qdev = qdev;
3474 tx_ring->wq_id = i;
3475 tx_ring->wq_len = qdev->tx_ring_size;
3476 tx_ring->wq_size =
3477 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3478
3479 /*
3480 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00003481 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003482 */
Ron Mercer39aa8162009-08-27 11:02:11 +00003483 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003484 }
3485
3486 for (i = 0; i < qdev->rx_ring_count; i++) {
3487 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003488 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003489 rx_ring->qdev = qdev;
3490 rx_ring->cq_id = i;
3491 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003492 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00003493 /*
3494 * Inbound (RSS) queues.
3495 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003496 rx_ring->cq_len = qdev->rx_ring_size;
3497 rx_ring->cq_size =
3498 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3499 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3500 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003501 rx_ring->lbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003502 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3503 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3504 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003505 rx_ring->sbq_len * sizeof(__le64);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003506 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003507 rx_ring->type = RX_Q;
3508 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003509 /*
3510 * Outbound queue handles outbound completions only.
3511 */
3512 /* outbound cq is same size as tx_ring it services. */
3513 rx_ring->cq_len = qdev->tx_ring_size;
3514 rx_ring->cq_size =
3515 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3516 rx_ring->lbq_len = 0;
3517 rx_ring->lbq_size = 0;
3518 rx_ring->lbq_buf_size = 0;
3519 rx_ring->sbq_len = 0;
3520 rx_ring->sbq_size = 0;
3521 rx_ring->sbq_buf_size = 0;
3522 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003523 }
3524 }
3525 return 0;
3526}
3527
3528static int qlge_open(struct net_device *ndev)
3529{
3530 int err = 0;
3531 struct ql_adapter *qdev = netdev_priv(ndev);
3532
3533 err = ql_configure_rings(qdev);
3534 if (err)
3535 return err;
3536
3537 err = ql_get_adapter_resources(qdev);
3538 if (err)
3539 goto error_up;
3540
3541 err = ql_adapter_up(qdev);
3542 if (err)
3543 goto error_up;
3544
3545 return err;
3546
3547error_up:
3548 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549 return err;
3550}
3551
3552static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3553{
3554 struct ql_adapter *qdev = netdev_priv(ndev);
3555
3556 if (ndev->mtu == 1500 && new_mtu == 9000) {
3557 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003558 queue_delayed_work(qdev->workqueue,
3559 &qdev->mpi_port_cfg_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003560 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3561 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3562 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3563 (ndev->mtu == 9000 && new_mtu == 9000)) {
3564 return 0;
3565 } else
3566 return -EINVAL;
3567 ndev->mtu = new_mtu;
3568 return 0;
3569}
3570
3571static struct net_device_stats *qlge_get_stats(struct net_device
3572 *ndev)
3573{
3574 struct ql_adapter *qdev = netdev_priv(ndev);
3575 return &qdev->stats;
3576}
3577
3578static void qlge_set_multicast_list(struct net_device *ndev)
3579{
3580 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3581 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00003582 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003583
Ron Mercercc288f52009-02-23 10:42:14 +00003584 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3585 if (status)
3586 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003587 /*
3588 * Set or clear promiscuous mode if a
3589 * transition is taking place.
3590 */
3591 if (ndev->flags & IFF_PROMISC) {
3592 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3593 if (ql_set_routing_reg
3594 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3595 QPRINTK(qdev, HW, ERR,
3596 "Failed to set promiscous mode.\n");
3597 } else {
3598 set_bit(QL_PROMISCUOUS, &qdev->flags);
3599 }
3600 }
3601 } else {
3602 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3603 if (ql_set_routing_reg
3604 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3605 QPRINTK(qdev, HW, ERR,
3606 "Failed to clear promiscous mode.\n");
3607 } else {
3608 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3609 }
3610 }
3611 }
3612
3613 /*
3614 * Set or clear all multicast mode if a
3615 * transition is taking place.
3616 */
3617 if ((ndev->flags & IFF_ALLMULTI) ||
3618 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3619 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3620 if (ql_set_routing_reg
3621 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3622 QPRINTK(qdev, HW, ERR,
3623 "Failed to set all-multi mode.\n");
3624 } else {
3625 set_bit(QL_ALLMULTI, &qdev->flags);
3626 }
3627 }
3628 } else {
3629 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3630 if (ql_set_routing_reg
3631 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3632 QPRINTK(qdev, HW, ERR,
3633 "Failed to clear all-multi mode.\n");
3634 } else {
3635 clear_bit(QL_ALLMULTI, &qdev->flags);
3636 }
3637 }
3638 }
3639
3640 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00003641 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3642 if (status)
3643 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003644 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3645 i++, mc_ptr = mc_ptr->next)
3646 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3647 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3648 QPRINTK(qdev, HW, ERR,
3649 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00003650 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003651 goto exit;
3652 }
Ron Mercercc288f52009-02-23 10:42:14 +00003653 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003654 if (ql_set_routing_reg
3655 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3656 QPRINTK(qdev, HW, ERR,
3657 "Failed to set multicast match mode.\n");
3658 } else {
3659 set_bit(QL_ALLMULTI, &qdev->flags);
3660 }
3661 }
3662exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00003663 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003664}
3665
3666static int qlge_set_mac_address(struct net_device *ndev, void *p)
3667{
3668 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3669 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00003670 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003671
3672 if (netif_running(ndev))
3673 return -EBUSY;
3674
3675 if (!is_valid_ether_addr(addr->sa_data))
3676 return -EADDRNOTAVAIL;
3677 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3678
Ron Mercercc288f52009-02-23 10:42:14 +00003679 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3680 if (status)
3681 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00003682 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3683 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00003684 if (status)
3685 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3686 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3687 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003688}
3689
3690static void qlge_tx_timeout(struct net_device *ndev)
3691{
3692 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08003693 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003694}
3695
3696static void ql_asic_reset_work(struct work_struct *work)
3697{
3698 struct ql_adapter *qdev =
3699 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00003700 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003701 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00003702 status = ql_adapter_down(qdev);
3703 if (status)
3704 goto error;
3705
3706 status = ql_adapter_up(qdev);
3707 if (status)
3708 goto error;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003709 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00003710 return;
3711error:
3712 QPRINTK(qdev, IFUP, ALERT,
3713 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00003714
Ron Mercerdb988122009-03-09 10:59:17 +00003715 set_bit(QL_ADAPTER_UP, &qdev->flags);
3716 dev_close(qdev->ndev);
3717 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003718}
3719
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003720static struct nic_operations qla8012_nic_ops = {
3721 .get_flash = ql_get_8012_flash_params,
3722 .port_initialize = ql_8012_port_initialize,
3723};
3724
Ron Mercercdca8d02009-03-02 08:07:31 +00003725static struct nic_operations qla8000_nic_ops = {
3726 .get_flash = ql_get_8000_flash_params,
3727 .port_initialize = ql_8000_port_initialize,
3728};
3729
Ron Mercere4552f52009-06-09 05:39:32 +00003730/* Find the pcie function number for the other NIC
3731 * on this chip. Since both NIC functions share a
3732 * common firmware we have the lowest enabled function
3733 * do any common work. Examples would be resetting
3734 * after a fatal firmware error, or doing a firmware
3735 * coredump.
3736 */
3737static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003738{
Ron Mercere4552f52009-06-09 05:39:32 +00003739 int status = 0;
3740 u32 temp;
3741 u32 nic_func1, nic_func2;
3742
3743 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3744 &temp);
3745 if (status)
3746 return status;
3747
3748 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3749 MPI_TEST_NIC_FUNC_MASK);
3750 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3751 MPI_TEST_NIC_FUNC_MASK);
3752
3753 if (qdev->func == nic_func1)
3754 qdev->alt_func = nic_func2;
3755 else if (qdev->func == nic_func2)
3756 qdev->alt_func = nic_func1;
3757 else
3758 status = -EIO;
3759
3760 return status;
3761}
3762
3763static int ql_get_board_info(struct ql_adapter *qdev)
3764{
3765 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003766 qdev->func =
3767 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00003768 if (qdev->func > 3)
3769 return -EIO;
3770
3771 status = ql_get_alt_pcie_func(qdev);
3772 if (status)
3773 return status;
3774
3775 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3776 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003777 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3778 qdev->port_link_up = STS_PL1;
3779 qdev->port_init = STS_PI1;
3780 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3781 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3782 } else {
3783 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3784 qdev->port_link_up = STS_PL0;
3785 qdev->port_init = STS_PI0;
3786 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3787 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3788 }
3789 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003790 qdev->device_id = qdev->pdev->device;
3791 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3792 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00003793 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3794 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00003795 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003796}
3797
3798static void ql_release_all(struct pci_dev *pdev)
3799{
3800 struct net_device *ndev = pci_get_drvdata(pdev);
3801 struct ql_adapter *qdev = netdev_priv(ndev);
3802
3803 if (qdev->workqueue) {
3804 destroy_workqueue(qdev->workqueue);
3805 qdev->workqueue = NULL;
3806 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003807
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003809 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003810 if (qdev->doorbell_area)
3811 iounmap(qdev->doorbell_area);
3812 pci_release_regions(pdev);
3813 pci_set_drvdata(pdev, NULL);
3814}
3815
3816static int __devinit ql_init_device(struct pci_dev *pdev,
3817 struct net_device *ndev, int cards_found)
3818{
3819 struct ql_adapter *qdev = netdev_priv(ndev);
3820 int pos, err = 0;
3821 u16 val16;
3822
Ron Mercere3324712009-07-02 06:06:13 +00003823 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003824 err = pci_enable_device(pdev);
3825 if (err) {
3826 dev_err(&pdev->dev, "PCI device enable failed.\n");
3827 return err;
3828 }
3829
Ron Mercerebd6e772009-09-29 08:39:25 +00003830 qdev->ndev = ndev;
3831 qdev->pdev = pdev;
3832 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003833 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3834 if (pos <= 0) {
3835 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3836 "aborting.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00003837 return pos;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003838 } else {
3839 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3840 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3841 val16 |= (PCI_EXP_DEVCTL_CERE |
3842 PCI_EXP_DEVCTL_NFERE |
3843 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3844 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3845 }
3846
3847 err = pci_request_regions(pdev, DRV_NAME);
3848 if (err) {
3849 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00003850 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003851 }
3852
3853 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07003854 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003855 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07003856 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003857 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07003858 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003859 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07003860 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003861 }
3862
3863 if (err) {
3864 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3865 goto err_out;
3866 }
3867
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003868 qdev->reg_base =
3869 ioremap_nocache(pci_resource_start(pdev, 1),
3870 pci_resource_len(pdev, 1));
3871 if (!qdev->reg_base) {
3872 dev_err(&pdev->dev, "Register mapping failed.\n");
3873 err = -ENOMEM;
3874 goto err_out;
3875 }
3876
3877 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3878 qdev->doorbell_area =
3879 ioremap_nocache(pci_resource_start(pdev, 3),
3880 pci_resource_len(pdev, 3));
3881 if (!qdev->doorbell_area) {
3882 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3883 err = -ENOMEM;
3884 goto err_out;
3885 }
3886
Ron Mercere4552f52009-06-09 05:39:32 +00003887 err = ql_get_board_info(qdev);
3888 if (err) {
3889 dev_err(&pdev->dev, "Register access failed.\n");
3890 err = -EIO;
3891 goto err_out;
3892 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003893 qdev->msg_enable = netif_msg_init(debug, default_msg);
3894 spin_lock_init(&qdev->hw_lock);
3895 spin_lock_init(&qdev->stats_lock);
3896
3897 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003898 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003899 if (err) {
3900 dev_err(&pdev->dev, "Invalid FLASH.\n");
3901 goto err_out;
3902 }
3903
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003904 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3905
3906 /* Set up the default ring sizes. */
3907 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3908 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3909
3910 /* Set up the coalescing parameters. */
3911 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3912 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3913 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3914 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3915
3916 /*
3917 * Set up the operating parameters.
3918 */
3919 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003920 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3921 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3922 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3923 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003924 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003925 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003926 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003927
3928 if (!cards_found) {
3929 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3930 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3931 DRV_NAME, DRV_VERSION);
3932 }
3933 return 0;
3934err_out:
3935 ql_release_all(pdev);
3936 pci_disable_device(pdev);
3937 return err;
3938}
3939
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003940
3941static const struct net_device_ops qlge_netdev_ops = {
3942 .ndo_open = qlge_open,
3943 .ndo_stop = qlge_close,
3944 .ndo_start_xmit = qlge_send,
3945 .ndo_change_mtu = qlge_change_mtu,
3946 .ndo_get_stats = qlge_get_stats,
3947 .ndo_set_multicast_list = qlge_set_multicast_list,
3948 .ndo_set_mac_address = qlge_set_mac_address,
3949 .ndo_validate_addr = eth_validate_addr,
3950 .ndo_tx_timeout = qlge_tx_timeout,
3951 .ndo_vlan_rx_register = ql_vlan_rx_register,
3952 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3953 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3954};
3955
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003956static int __devinit qlge_probe(struct pci_dev *pdev,
3957 const struct pci_device_id *pci_entry)
3958{
3959 struct net_device *ndev = NULL;
3960 struct ql_adapter *qdev = NULL;
3961 static int cards_found = 0;
3962 int err = 0;
3963
Ron Mercer1e213302009-03-09 10:59:21 +00003964 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
3965 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003966 if (!ndev)
3967 return -ENOMEM;
3968
3969 err = ql_init_device(pdev, ndev, cards_found);
3970 if (err < 0) {
3971 free_netdev(ndev);
3972 return err;
3973 }
3974
3975 qdev = netdev_priv(ndev);
3976 SET_NETDEV_DEV(ndev, &pdev->dev);
3977 ndev->features = (0
3978 | NETIF_F_IP_CSUM
3979 | NETIF_F_SG
3980 | NETIF_F_TSO
3981 | NETIF_F_TSO6
3982 | NETIF_F_TSO_ECN
3983 | NETIF_F_HW_VLAN_TX
3984 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00003985 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003986
3987 if (test_bit(QL_DMA64, &qdev->flags))
3988 ndev->features |= NETIF_F_HIGHDMA;
3989
3990 /*
3991 * Set up net_device structure.
3992 */
3993 ndev->tx_queue_len = qdev->tx_ring_size;
3994 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003995
3996 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003997 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003998 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08003999
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004000 err = register_netdev(ndev);
4001 if (err) {
4002 dev_err(&pdev->dev, "net device registration failed.\n");
4003 ql_release_all(pdev);
4004 pci_disable_device(pdev);
4005 return err;
4006 }
Ron Mercer6a473302009-07-02 06:06:12 +00004007 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004008 ql_display_dev_info(ndev);
4009 cards_found++;
4010 return 0;
4011}
4012
4013static void __devexit qlge_remove(struct pci_dev *pdev)
4014{
4015 struct net_device *ndev = pci_get_drvdata(pdev);
4016 unregister_netdev(ndev);
4017 ql_release_all(pdev);
4018 pci_disable_device(pdev);
4019 free_netdev(ndev);
4020}
4021
4022/*
4023 * This callback is called by the PCI subsystem whenever
4024 * a PCI bus error is detected.
4025 */
4026static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4027 enum pci_channel_state state)
4028{
4029 struct net_device *ndev = pci_get_drvdata(pdev);
4030 struct ql_adapter *qdev = netdev_priv(ndev);
4031
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004032 netif_device_detach(ndev);
4033
4034 if (state == pci_channel_io_perm_failure)
4035 return PCI_ERS_RESULT_DISCONNECT;
4036
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004037 if (netif_running(ndev))
4038 ql_adapter_down(qdev);
4039
4040 pci_disable_device(pdev);
4041
4042 /* Request a slot reset. */
4043 return PCI_ERS_RESULT_NEED_RESET;
4044}
4045
4046/*
4047 * This callback is called after the PCI buss has been reset.
4048 * Basically, this tries to restart the card from scratch.
4049 * This is a shortened version of the device probe/discovery code,
4050 * it resembles the first-half of the () routine.
4051 */
4052static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4053{
4054 struct net_device *ndev = pci_get_drvdata(pdev);
4055 struct ql_adapter *qdev = netdev_priv(ndev);
4056
4057 if (pci_enable_device(pdev)) {
4058 QPRINTK(qdev, IFUP, ERR,
4059 "Cannot re-enable PCI device after reset.\n");
4060 return PCI_ERS_RESULT_DISCONNECT;
4061 }
4062
4063 pci_set_master(pdev);
4064
4065 netif_carrier_off(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004066 ql_adapter_reset(qdev);
4067
4068 /* Make sure the EEPROM is good */
4069 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4070
4071 if (!is_valid_ether_addr(ndev->perm_addr)) {
4072 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4073 return PCI_ERS_RESULT_DISCONNECT;
4074 }
4075
4076 return PCI_ERS_RESULT_RECOVERED;
4077}
4078
4079static void qlge_io_resume(struct pci_dev *pdev)
4080{
4081 struct net_device *ndev = pci_get_drvdata(pdev);
4082 struct ql_adapter *qdev = netdev_priv(ndev);
4083
4084 pci_set_master(pdev);
4085
4086 if (netif_running(ndev)) {
4087 if (ql_adapter_up(qdev)) {
4088 QPRINTK(qdev, IFUP, ERR,
4089 "Device initialization failed after reset.\n");
4090 return;
4091 }
4092 }
4093
4094 netif_device_attach(ndev);
4095}
4096
4097static struct pci_error_handlers qlge_err_handler = {
4098 .error_detected = qlge_io_error_detected,
4099 .slot_reset = qlge_io_slot_reset,
4100 .resume = qlge_io_resume,
4101};
4102
4103static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4104{
4105 struct net_device *ndev = pci_get_drvdata(pdev);
4106 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004107 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004108
4109 netif_device_detach(ndev);
4110
4111 if (netif_running(ndev)) {
4112 err = ql_adapter_down(qdev);
4113 if (!err)
4114 return err;
4115 }
4116
4117 err = pci_save_state(pdev);
4118 if (err)
4119 return err;
4120
4121 pci_disable_device(pdev);
4122
4123 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4124
4125 return 0;
4126}
4127
David S. Miller04da2cf2008-09-19 16:14:24 -07004128#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004129static int qlge_resume(struct pci_dev *pdev)
4130{
4131 struct net_device *ndev = pci_get_drvdata(pdev);
4132 struct ql_adapter *qdev = netdev_priv(ndev);
4133 int err;
4134
4135 pci_set_power_state(pdev, PCI_D0);
4136 pci_restore_state(pdev);
4137 err = pci_enable_device(pdev);
4138 if (err) {
4139 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4140 return err;
4141 }
4142 pci_set_master(pdev);
4143
4144 pci_enable_wake(pdev, PCI_D3hot, 0);
4145 pci_enable_wake(pdev, PCI_D3cold, 0);
4146
4147 if (netif_running(ndev)) {
4148 err = ql_adapter_up(qdev);
4149 if (err)
4150 return err;
4151 }
4152
4153 netif_device_attach(ndev);
4154
4155 return 0;
4156}
David S. Miller04da2cf2008-09-19 16:14:24 -07004157#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004158
4159static void qlge_shutdown(struct pci_dev *pdev)
4160{
4161 qlge_suspend(pdev, PMSG_SUSPEND);
4162}
4163
4164static struct pci_driver qlge_driver = {
4165 .name = DRV_NAME,
4166 .id_table = qlge_pci_tbl,
4167 .probe = qlge_probe,
4168 .remove = __devexit_p(qlge_remove),
4169#ifdef CONFIG_PM
4170 .suspend = qlge_suspend,
4171 .resume = qlge_resume,
4172#endif
4173 .shutdown = qlge_shutdown,
4174 .err_handler = &qlge_err_handler
4175};
4176
4177static int __init qlge_init_module(void)
4178{
4179 return pci_register_driver(&qlge_driver);
4180}
4181
4182static void __exit qlge_exit(void)
4183{
4184 pci_unregister_driver(&qlge_driver);
4185}
4186
4187module_init(qlge_init_module);
4188module_exit(qlge_exit);