blob: e6210563abd44560ae67958de027d4da98bcbbcf [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
Ron Mercer8aae2602010-01-15 13:31:28 +000076static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000080 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000087
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000088static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000089 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040091 /* required last entry */
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
stephen hemmingerac409212010-10-21 07:50:54 +000097static int ql_wol(struct ql_adapter *qdev);
98static void qlge_set_multicast_list(struct net_device *ndev);
99
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400100/* This hardware semaphore causes exclusive access to
101 * resources shared between the NIC driver, MPI firmware,
102 * FCOE firmware and the FC driver.
103 */
104static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
105{
106 u32 sem_bits = 0;
107
108 switch (sem_mask) {
109 case SEM_XGMAC0_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
111 break;
112 case SEM_XGMAC1_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
114 break;
115 case SEM_ICB_MASK:
116 sem_bits = SEM_SET << SEM_ICB_SHIFT;
117 break;
118 case SEM_MAC_ADDR_MASK:
119 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
120 break;
121 case SEM_FLASH_MASK:
122 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
123 break;
124 case SEM_PROBE_MASK:
125 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
126 break;
127 case SEM_RT_IDX_MASK:
128 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
129 break;
130 case SEM_PROC_REG_MASK:
131 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
132 break;
133 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000134 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -EINVAL;
136 }
137
138 ql_write32(qdev, SEM, sem_bits | sem_mask);
139 return !(ql_read32(qdev, SEM) & sem_bits);
140}
141
142int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
143{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000144 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400145 do {
146 if (!ql_sem_trylock(qdev, sem_mask))
147 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000148 udelay(100);
149 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400150 return -ETIMEDOUT;
151}
152
153void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
154{
155 ql_write32(qdev, SEM, sem_mask);
156 ql_read32(qdev, SEM); /* flush */
157}
158
159/* This function waits for a specific bit to come ready
160 * in a given register. It is used mostly by the initialize
161 * process, but is also used in kernel thread API such as
162 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
163 */
164int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
165{
166 u32 temp;
167 int count = UDELAY_COUNT;
168
169 while (count) {
170 temp = ql_read32(qdev, reg);
171
172 /* check for errors */
173 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000174 netif_alert(qdev, probe, qdev->ndev,
175 "register 0x%.08x access error, value = 0x%.08x!.\n",
176 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400177 return -EIO;
178 } else if (temp & bit)
179 return 0;
180 udelay(UDELAY_DELAY);
181 count--;
182 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000183 netif_alert(qdev, probe, qdev->ndev,
184 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400185 return -ETIMEDOUT;
186}
187
188/* The CFG register is used to download TX and RX control blocks
189 * to the chip. This function waits for an operation to complete.
190 */
191static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
192{
193 int count = UDELAY_COUNT;
194 u32 temp;
195
196 while (count) {
197 temp = ql_read32(qdev, CFG);
198 if (temp & CFG_LE)
199 return -EIO;
200 if (!(temp & bit))
201 return 0;
202 udelay(UDELAY_DELAY);
203 count--;
204 }
205 return -ETIMEDOUT;
206}
207
208
209/* Used to issue init control blocks to hw. Maps control block,
210 * sets address, triggers download, waits for completion.
211 */
212int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
213 u16 q_id)
214{
215 u64 map;
216 int status = 0;
217 int direction;
218 u32 mask;
219 u32 value;
220
221 direction =
222 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
223 PCI_DMA_FROMDEVICE;
224
225 map = pci_map_single(qdev->pdev, ptr, size, direction);
226 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000227 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400228 return -ENOMEM;
229 }
230
Ron Mercer4322c5b2009-07-02 06:06:06 +0000231 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232 if (status)
233 return status;
234
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400235 status = ql_wait_cfg(qdev, bit);
236 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000237 netif_err(qdev, ifup, qdev->ndev,
238 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400239 goto exit;
240 }
241
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400242 ql_write32(qdev, ICB_L, (u32) map);
243 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244
245 mask = CFG_Q_MASK | (bit << 16);
246 value = bit | (q_id << CFG_Q_SHIFT);
247 ql_write32(qdev, CFG, (mask | value));
248
249 /*
250 * Wait for the bit to clear after signaling hw.
251 */
252 status = ql_wait_cfg(qdev, bit);
253exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000254 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400255 pci_unmap_single(qdev->pdev, map, size, direction);
256 return status;
257}
258
259/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
260int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
261 u32 *value)
262{
263 u32 offset = 0;
264 int status;
265
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 switch (type) {
267 case MAC_ADDR_TYPE_MULTI_MAC:
268 case MAC_ADDR_TYPE_CAM_MAC:
269 {
270 status =
271 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800272 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400273 if (status)
274 goto exit;
275 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276 (index << MAC_ADDR_IDX_SHIFT) | /* index */
277 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278 status =
279 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800280 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400281 if (status)
282 goto exit;
283 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 if (type == MAC_ADDR_TYPE_CAM_MAC) {
299 status =
300 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800301 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400302 if (status)
303 goto exit;
304 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
305 (index << MAC_ADDR_IDX_SHIFT) | /* index */
306 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
307 status =
308 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800309 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400310 if (status)
311 goto exit;
312 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
313 }
314 break;
315 }
316 case MAC_ADDR_TYPE_VLAN:
317 case MAC_ADDR_TYPE_MULTI_FLTR:
318 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000319 netif_crit(qdev, ifup, qdev->ndev,
320 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 status = -EPERM;
322 }
323exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400324 return status;
325}
326
327/* Set up a MAC, multicast or VLAN address for the
328 * inbound frame matching.
329 */
330static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
331 u16 index)
332{
333 u32 offset = 0;
334 int status = 0;
335
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400336 switch (type) {
337 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000338 {
339 u32 upper = (addr[0] << 8) | addr[1];
340 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
341 (addr[4] << 8) | (addr[5]);
342
343 status =
344 ql_wait_reg_rdy(qdev,
345 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
346 if (status)
347 goto exit;
348 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
349 (index << MAC_ADDR_IDX_SHIFT) |
350 type | MAC_ADDR_E);
351 ql_write32(qdev, MAC_ADDR_DATA, lower);
352 status =
353 ql_wait_reg_rdy(qdev,
354 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
355 if (status)
356 goto exit;
357 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
358 (index << MAC_ADDR_IDX_SHIFT) |
359 type | MAC_ADDR_E);
360
361 ql_write32(qdev, MAC_ADDR_DATA, upper);
362 status =
363 ql_wait_reg_rdy(qdev,
364 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
365 if (status)
366 goto exit;
367 break;
368 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400369 case MAC_ADDR_TYPE_CAM_MAC:
370 {
371 u32 cam_output;
372 u32 upper = (addr[0] << 8) | addr[1];
373 u32 lower =
374 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
375 (addr[5]);
376
Joe Perchesae9540f72010-02-09 11:49:52 +0000377 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
378 "Adding %s address %pM at index %d in the CAM.\n",
379 type == MAC_ADDR_TYPE_MULTI_MAC ?
380 "MULTICAST" : "UNICAST",
381 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400382
383 status =
384 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800385 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400386 if (status)
387 goto exit;
388 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
389 (index << MAC_ADDR_IDX_SHIFT) | /* index */
390 type); /* type */
391 ql_write32(qdev, MAC_ADDR_DATA, lower);
392 status =
393 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400395 if (status)
396 goto exit;
397 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
399 type); /* type */
400 ql_write32(qdev, MAC_ADDR_DATA, upper);
401 status =
402 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800403 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400404 if (status)
405 goto exit;
406 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
407 (index << MAC_ADDR_IDX_SHIFT) | /* index */
408 type); /* type */
409 /* This field should also include the queue id
410 and possibly the function id. Right now we hardcode
411 the route field to NIC core.
412 */
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output = (CAM_OUT_ROUTE_NIC |
414 (qdev->
415 func << CAM_OUT_FUNC_SHIFT) |
416 (0 << CAM_OUT_CQ_ID_SHIFT));
417 if (qdev->vlgrp)
418 cam_output |= CAM_OUT_RV;
419 /* route to NIC core */
420 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400421 break;
422 }
423 case MAC_ADDR_TYPE_VLAN:
424 {
425 u32 enable_bit = *((u32 *) &addr[0]);
426 /* For VLAN, the addr actually holds a bit that
427 * either enables or disables the vlan id we are
428 * addressing. It's either MAC_ADDR_E on or off.
429 * That's bit-27 we're talking about.
430 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000431 netif_info(qdev, ifup, qdev->ndev,
432 "%s VLAN ID %d %s the CAM.\n",
433 enable_bit ? "Adding" : "Removing",
434 index,
435 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400436
437 status =
438 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800439 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400440 if (status)
441 goto exit;
442 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
443 (index << MAC_ADDR_IDX_SHIFT) | /* index */
444 type | /* type */
445 enable_bit); /* enable/disable */
446 break;
447 }
448 case MAC_ADDR_TYPE_MULTI_FLTR:
449 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000450 netif_crit(qdev, ifup, qdev->ndev,
451 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400452 status = -EPERM;
453 }
454exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400455 return status;
456}
457
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000458/* Set or clear MAC address in hardware. We sometimes
459 * have to clear it to prevent wrong frame routing
460 * especially in a bonding environment.
461 */
462static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
463{
464 int status;
465 char zero_mac_addr[ETH_ALEN];
466 char *addr;
467
468 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000469 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000470 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
471 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000472 } else {
473 memset(zero_mac_addr, 0, ETH_ALEN);
474 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000475 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
476 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000477 }
478 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
479 if (status)
480 return status;
481 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
482 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
483 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
484 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000485 netif_err(qdev, ifup, qdev->ndev,
486 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000487 return status;
488}
489
Ron Mercer6a473302009-07-02 06:06:12 +0000490void ql_link_on(struct ql_adapter *qdev)
491{
Joe Perchesae9540f72010-02-09 11:49:52 +0000492 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000493 netif_carrier_on(qdev->ndev);
494 ql_set_mac_addr(qdev, 1);
495}
496
497void ql_link_off(struct ql_adapter *qdev)
498{
Joe Perchesae9540f72010-02-09 11:49:52 +0000499 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000500 netif_carrier_off(qdev->ndev);
501 ql_set_mac_addr(qdev, 0);
502}
503
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400504/* Get a specific frame routing value from the CAM.
505 * Used for debug and reg dump.
506 */
507int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508{
509 int status = 0;
510
Ron Mercer939678f2009-01-04 17:08:29 -0800511 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400512 if (status)
513 goto exit;
514
515 ql_write32(qdev, RT_IDX,
516 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800517 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400518 if (status)
519 goto exit;
520 *value = ql_read32(qdev, RT_DATA);
521exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400522 return status;
523}
524
525/* The NIC function for this chip has 16 routing indexes. Each one can be used
526 * to route different frame types to various inbound queues. We send broadcast/
527 * multicast/error frames to the default queue for slow handling,
528 * and CAM hit/RSS frames to the fast handling queues.
529 */
530static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
531 int enable)
532{
Ron Mercer8587ea32009-02-23 10:42:15 +0000533 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400534 u32 value = 0;
535
Joe Perchesae9540f72010-02-09 11:49:52 +0000536 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
537 "%s %s mask %s the routing reg.\n",
538 enable ? "Adding" : "Removing",
539 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
540 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
541 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
542 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
543 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
544 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
545 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
546 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
547 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
548 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
549 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
550 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
551 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
552 index == RT_IDX_UNUSED013 ? "UNUSED13" :
553 index == RT_IDX_UNUSED014 ? "UNUSED14" :
554 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
555 "(Bad index != RT_IDX)",
556 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400557
558 switch (mask) {
559 case RT_IDX_CAM_HIT:
560 {
561 value = RT_IDX_DST_CAM_Q | /* dest */
562 RT_IDX_TYPE_NICQ | /* type */
563 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
564 break;
565 }
566 case RT_IDX_VALID: /* Promiscuous Mode frames. */
567 {
568 value = RT_IDX_DST_DFLT_Q | /* dest */
569 RT_IDX_TYPE_NICQ | /* type */
570 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
571 break;
572 }
573 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
574 {
575 value = RT_IDX_DST_DFLT_Q | /* dest */
576 RT_IDX_TYPE_NICQ | /* type */
577 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
578 break;
579 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000580 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
581 {
582 value = RT_IDX_DST_DFLT_Q | /* dest */
583 RT_IDX_TYPE_NICQ | /* type */
584 (RT_IDX_IP_CSUM_ERR_SLOT <<
585 RT_IDX_IDX_SHIFT); /* index */
586 break;
587 }
588 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
589 {
590 value = RT_IDX_DST_DFLT_Q | /* dest */
591 RT_IDX_TYPE_NICQ | /* type */
592 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
593 RT_IDX_IDX_SHIFT); /* index */
594 break;
595 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400596 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
597 {
598 value = RT_IDX_DST_DFLT_Q | /* dest */
599 RT_IDX_TYPE_NICQ | /* type */
600 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
601 break;
602 }
603 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
604 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000605 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400606 RT_IDX_TYPE_NICQ | /* type */
607 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
608 break;
609 }
610 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
611 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000612 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400613 RT_IDX_TYPE_NICQ | /* type */
614 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
615 break;
616 }
617 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
618 {
619 value = RT_IDX_DST_RSS | /* dest */
620 RT_IDX_TYPE_NICQ | /* type */
621 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
622 break;
623 }
624 case 0: /* Clear the E-bit on an entry. */
625 {
626 value = RT_IDX_DST_DFLT_Q | /* dest */
627 RT_IDX_TYPE_NICQ | /* type */
628 (index << RT_IDX_IDX_SHIFT);/* index */
629 break;
630 }
631 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000632 netif_err(qdev, ifup, qdev->ndev,
633 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634 status = -EPERM;
635 goto exit;
636 }
637
638 if (value) {
639 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
640 if (status)
641 goto exit;
642 value |= (enable ? RT_IDX_E : 0);
643 ql_write32(qdev, RT_IDX, value);
644 ql_write32(qdev, RT_DATA, enable ? mask : 0);
645 }
646exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 return status;
648}
649
650static void ql_enable_interrupts(struct ql_adapter *qdev)
651{
652 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
653}
654
655static void ql_disable_interrupts(struct ql_adapter *qdev)
656{
657 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
658}
659
660/* If we're running with multiple MSI-X vectors then we enable on the fly.
661 * Otherwise, we may have multiple outstanding workers and don't want to
662 * enable until the last one finishes. In this case, the irq_cnt gets
663 * incremented everytime we queue a worker and decremented everytime
664 * a worker finishes. Once it hits zero we enable the interrupt.
665 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700666u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400667{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668 u32 var = 0;
669 unsigned long hw_flags = 0;
670 struct intr_context *ctx = qdev->intr_context + intr;
671
672 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
673 /* Always enable if we're MSIX multi interrupts and
674 * it's not the default (zeroeth) interrupt.
675 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400676 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 ctx->intr_en_mask);
678 var = ql_read32(qdev, STS);
679 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400680 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700681
682 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
683 if (atomic_dec_and_test(&ctx->irq_cnt)) {
684 ql_write32(qdev, INTR_EN,
685 ctx->intr_en_mask);
686 var = ql_read32(qdev, STS);
687 }
688 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
689 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400690}
691
692static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
693{
694 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700695 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400696
Ron Mercerbb0d2152008-10-20 10:30:26 -0700697 /* HW disables for us if we're MSIX multi interrupts and
698 * it's not the default (zeroeth) interrupt.
699 */
700 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
701 return 0;
702
703 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000704 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700705 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400706 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400708 var = ql_read32(qdev, STS);
709 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700710 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000711 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400712 return var;
713}
714
715static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
716{
717 int i;
718 for (i = 0; i < qdev->intr_count; i++) {
719 /* The enable call does a atomic_dec_and_test
720 * and enables only if the result is zero.
721 * So we precharge it here.
722 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700723 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
724 i == 0))
725 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400726 ql_enable_completion_interrupt(qdev, i);
727 }
728
729}
730
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000731static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
732{
733 int status, i;
734 u16 csum = 0;
735 __le16 *flash = (__le16 *)&qdev->flash;
736
737 status = strncmp((char *)&qdev->flash, str, 4);
738 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000739 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000740 return status;
741 }
742
743 for (i = 0; i < size; i++)
744 csum += le16_to_cpu(*flash++);
745
746 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000747 netif_err(qdev, ifup, qdev->ndev,
748 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000749
750 return csum;
751}
752
Ron Mercer26351472009-02-02 13:53:57 -0800753static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400754{
755 int status = 0;
756 /* wait for reg to come ready */
757 status = ql_wait_reg_rdy(qdev,
758 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
759 if (status)
760 goto exit;
761 /* set up for reg read */
762 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
763 /* wait for reg to come ready */
764 status = ql_wait_reg_rdy(qdev,
765 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
766 if (status)
767 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800768 /* This data is stored on flash as an array of
769 * __le32. Since ql_read32() returns cpu endian
770 * we need to swap it back.
771 */
772 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400773exit:
774 return status;
775}
776
Ron Mercercdca8d02009-03-02 08:07:31 +0000777static int ql_get_8000_flash_params(struct ql_adapter *qdev)
778{
779 u32 i, size;
780 int status;
781 __le32 *p = (__le32 *)&qdev->flash;
782 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000783 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000784
785 /* Get flash offset for function and adjust
786 * for dword access.
787 */
Ron Mercere4552f52009-06-09 05:39:32 +0000788 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000789 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
790 else
791 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
792
793 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
794 return -ETIMEDOUT;
795
796 size = sizeof(struct flash_params_8000) / sizeof(u32);
797 for (i = 0; i < size; i++, p++) {
798 status = ql_read_flash_word(qdev, i+offset, p);
799 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000800 netif_err(qdev, ifup, qdev->ndev,
801 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 goto exit;
803 }
804 }
805
806 status = ql_validate_flash(qdev,
807 sizeof(struct flash_params_8000) / sizeof(u16),
808 "8000");
809 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000810 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000811 status = -EINVAL;
812 goto exit;
813 }
814
Ron Mercer542512e2009-06-09 05:39:33 +0000815 /* Extract either manufacturer or BOFM modified
816 * MAC address.
817 */
818 if (qdev->flash.flash_params_8000.data_type1 == 2)
819 memcpy(mac_addr,
820 qdev->flash.flash_params_8000.mac_addr1,
821 qdev->ndev->addr_len);
822 else
823 memcpy(mac_addr,
824 qdev->flash.flash_params_8000.mac_addr,
825 qdev->ndev->addr_len);
826
827 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000828 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000829 status = -EINVAL;
830 goto exit;
831 }
832
833 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000834 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000835 qdev->ndev->addr_len);
836
837exit:
838 ql_sem_unlock(qdev, SEM_FLASH_MASK);
839 return status;
840}
841
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000842static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400843{
844 int i;
845 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800846 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800847 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000848 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800849
850 /* Second function's parameters follow the first
851 * function's.
852 */
Ron Mercere4552f52009-06-09 05:39:32 +0000853 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000854 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400855
856 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
857 return -ETIMEDOUT;
858
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000859 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800860 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400861 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000862 netif_err(qdev, ifup, qdev->ndev,
863 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400864 goto exit;
865 }
866
867 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000868
869 status = ql_validate_flash(qdev,
870 sizeof(struct flash_params_8012) / sizeof(u16),
871 "8012");
872 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000873 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000874 status = -EINVAL;
875 goto exit;
876 }
877
878 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
879 status = -EINVAL;
880 goto exit;
881 }
882
883 memcpy(qdev->ndev->dev_addr,
884 qdev->flash.flash_params_8012.mac_addr,
885 qdev->ndev->addr_len);
886
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400887exit:
888 ql_sem_unlock(qdev, SEM_FLASH_MASK);
889 return status;
890}
891
892/* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
895 */
896static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
897{
898 int status;
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902 if (status)
903 return status;
904 /* write the data to the data reg */
905 ql_write32(qdev, XGMAC_DATA, data);
906 /* trigger the write */
907 ql_write32(qdev, XGMAC_ADDR, reg);
908 return status;
909}
910
911/* xgmac register are located behind the xgmac_addr and xgmac_data
912 * register pair. Each read/write requires us to wait for the ready
913 * bit before reading/writing the data.
914 */
915int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
916{
917 int status = 0;
918 /* wait for reg to come ready */
919 status = ql_wait_reg_rdy(qdev,
920 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
921 if (status)
922 goto exit;
923 /* set up for reg read */
924 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
925 /* wait for reg to come ready */
926 status = ql_wait_reg_rdy(qdev,
927 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
928 if (status)
929 goto exit;
930 /* get the data */
931 *data = ql_read32(qdev, XGMAC_DATA);
932exit:
933 return status;
934}
935
936/* This is used for reading the 64-bit statistics regs. */
937int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
938{
939 int status = 0;
940 u32 hi = 0;
941 u32 lo = 0;
942
943 status = ql_read_xgmac_reg(qdev, reg, &lo);
944 if (status)
945 goto exit;
946
947 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
948 if (status)
949 goto exit;
950
951 *data = (u64) lo | ((u64) hi << 32);
952
953exit:
954 return status;
955}
956
Ron Mercercdca8d02009-03-02 08:07:31 +0000957static int ql_8000_port_initialize(struct ql_adapter *qdev)
958{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000959 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000960 /*
961 * Get MPI firmware version for driver banner
962 * and ethool info.
963 */
964 status = ql_mb_about_fw(qdev);
965 if (status)
966 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000967 status = ql_mb_get_fw_state(qdev);
968 if (status)
969 goto exit;
970 /* Wake up a worker to get/set the TX/RX frame sizes. */
971 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
972exit:
973 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000974}
975
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400976/* Take the MAC Core out of reset.
977 * Enable statistics counting.
978 * Take the transmitter/receiver out of reset.
979 * This functionality may be done in the MPI firmware at a
980 * later date.
981 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000982static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400983{
984 int status = 0;
985 u32 data;
986
987 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
988 /* Another function has the semaphore, so
989 * wait for the port init bit to come ready.
990 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000991 netif_info(qdev, link, qdev->ndev,
992 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400993 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
994 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000995 netif_crit(qdev, link, qdev->ndev,
996 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400997 }
998 return status;
999 }
1000
Joe Perchesae9540f72010-02-09 11:49:52 +00001001 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001002 /* Set the core reset. */
1003 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1004 if (status)
1005 goto end;
1006 data |= GLOBAL_CFG_RESET;
1007 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Clear the core reset and turn on jumbo for receiver. */
1012 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1013 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1014 data |= GLOBAL_CFG_TX_STAT_EN;
1015 data |= GLOBAL_CFG_RX_STAT_EN;
1016 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1017 if (status)
1018 goto end;
1019
1020 /* Enable transmitter, and clear it's reset. */
1021 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1022 if (status)
1023 goto end;
1024 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1025 data |= TX_CFG_EN; /* Enable the transmitter. */
1026 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1027 if (status)
1028 goto end;
1029
1030 /* Enable receiver and clear it's reset. */
1031 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1032 if (status)
1033 goto end;
1034 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1035 data |= RX_CFG_EN; /* Enable the receiver. */
1036 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1037 if (status)
1038 goto end;
1039
1040 /* Turn on jumbo. */
1041 status =
1042 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1043 if (status)
1044 goto end;
1045 status =
1046 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1047 if (status)
1048 goto end;
1049
1050 /* Signal to the world that the port is enabled. */
1051 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1052end:
1053 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1054 return status;
1055}
1056
Ron Mercer7c734352009-10-19 03:32:19 +00001057static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1058{
1059 return PAGE_SIZE << qdev->lbq_buf_order;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1066 rx_ring->lbq_curr_idx++;
1067 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1068 rx_ring->lbq_curr_idx = 0;
1069 rx_ring->lbq_free_cnt++;
1070 return lbq_desc;
1071}
1072
Ron Mercer7c734352009-10-19 03:32:19 +00001073static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1074 struct rx_ring *rx_ring)
1075{
1076 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1077
1078 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001079 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001080 rx_ring->lbq_buf_size,
1081 PCI_DMA_FROMDEVICE);
1082
1083 /* If it's the last chunk of our master page then
1084 * we unmap it.
1085 */
1086 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1087 == ql_lbq_block_size(qdev))
1088 pci_unmap_page(qdev->pdev,
1089 lbq_desc->p.pg_chunk.map,
1090 ql_lbq_block_size(qdev),
1091 PCI_DMA_FROMDEVICE);
1092 return lbq_desc;
1093}
1094
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001095/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001096static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097{
1098 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1099 rx_ring->sbq_curr_idx++;
1100 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1101 rx_ring->sbq_curr_idx = 0;
1102 rx_ring->sbq_free_cnt++;
1103 return sbq_desc;
1104}
1105
1106/* Update an rx ring index. */
1107static void ql_update_cq(struct rx_ring *rx_ring)
1108{
1109 rx_ring->cnsmr_idx++;
1110 rx_ring->curr_entry++;
1111 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1112 rx_ring->cnsmr_idx = 0;
1113 rx_ring->curr_entry = rx_ring->cq_base;
1114 }
1115}
1116
1117static void ql_write_cq_idx(struct rx_ring *rx_ring)
1118{
1119 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1120}
1121
Ron Mercer7c734352009-10-19 03:32:19 +00001122static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1123 struct bq_desc *lbq_desc)
1124{
1125 if (!rx_ring->pg_chunk.page) {
1126 u64 map;
1127 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1128 GFP_ATOMIC,
1129 qdev->lbq_buf_order);
1130 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001131 netif_err(qdev, drv, qdev->ndev,
1132 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001133 return -ENOMEM;
1134 }
1135 rx_ring->pg_chunk.offset = 0;
1136 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1137 0, ql_lbq_block_size(qdev),
1138 PCI_DMA_FROMDEVICE);
1139 if (pci_dma_mapping_error(qdev->pdev, map)) {
1140 __free_pages(rx_ring->pg_chunk.page,
1141 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001142 netif_err(qdev, drv, qdev->ndev,
1143 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001144 return -ENOMEM;
1145 }
1146 rx_ring->pg_chunk.map = map;
1147 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1148 }
1149
1150 /* Copy the current master pg_chunk info
1151 * to the current descriptor.
1152 */
1153 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1154
1155 /* Adjust the master page chunk for next
1156 * buffer get.
1157 */
1158 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1159 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1160 rx_ring->pg_chunk.page = NULL;
1161 lbq_desc->p.pg_chunk.last_flag = 1;
1162 } else {
1163 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1164 get_page(rx_ring->pg_chunk.page);
1165 lbq_desc->p.pg_chunk.last_flag = 0;
1166 }
1167 return 0;
1168}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169/* Process (refill) a large buffer queue. */
1170static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1171{
Ron Mercer49f21862009-02-23 10:42:16 +00001172 u32 clean_idx = rx_ring->lbq_clean_idx;
1173 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001174 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001175 u64 map;
1176 int i;
1177
Ron Mercer7c734352009-10-19 03:32:19 +00001178 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001179 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001180 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181 "lbq: try cleaning clean_idx = %d.\n",
1182 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001183 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001184 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001185 netif_err(qdev, ifup, qdev->ndev,
1186 "Could not get a page chunk.\n");
1187 return;
1188 }
Ron Mercer7c734352009-10-19 03:32:19 +00001189
1190 map = lbq_desc->p.pg_chunk.map +
1191 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001192 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1193 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001194 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001195 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001196
1197 pci_dma_sync_single_for_device(qdev->pdev, map,
1198 rx_ring->lbq_buf_size,
1199 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001200 clean_idx++;
1201 if (clean_idx == rx_ring->lbq_len)
1202 clean_idx = 0;
1203 }
1204
1205 rx_ring->lbq_clean_idx = clean_idx;
1206 rx_ring->lbq_prod_idx += 16;
1207 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1208 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001209 rx_ring->lbq_free_cnt -= 16;
1210 }
1211
1212 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1214 "lbq: updating prod idx = %d.\n",
1215 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 ql_write_db_reg(rx_ring->lbq_prod_idx,
1217 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001218 }
1219}
1220
1221/* Process (refill) a small buffer queue. */
1222static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1223{
Ron Mercer49f21862009-02-23 10:42:16 +00001224 u32 clean_idx = rx_ring->sbq_clean_idx;
1225 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001226 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001227 u64 map;
1228 int i;
1229
1230 while (rx_ring->sbq_free_cnt > 16) {
1231 for (i = 0; i < 16; i++) {
1232 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001233 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1234 "sbq: try cleaning clean_idx = %d.\n",
1235 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001237 netif_printk(qdev, rx_status, KERN_DEBUG,
1238 qdev->ndev,
1239 "sbq: getting new skb for index %d.\n",
1240 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001241 sbq_desc->p.skb =
1242 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001243 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001244 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001245 netif_err(qdev, probe, qdev->ndev,
1246 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001247 rx_ring->sbq_clean_idx = clean_idx;
1248 return;
1249 }
1250 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1251 map = pci_map_single(qdev->pdev,
1252 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001253 rx_ring->sbq_buf_size,
1254 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001255 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001256 netif_err(qdev, ifup, qdev->ndev,
1257 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001258 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001259 dev_kfree_skb_any(sbq_desc->p.skb);
1260 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001261 return;
1262 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001263 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1264 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001265 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001266 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001267 }
1268
1269 clean_idx++;
1270 if (clean_idx == rx_ring->sbq_len)
1271 clean_idx = 0;
1272 }
1273 rx_ring->sbq_clean_idx = clean_idx;
1274 rx_ring->sbq_prod_idx += 16;
1275 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1276 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001277 rx_ring->sbq_free_cnt -= 16;
1278 }
1279
1280 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001281 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1282 "sbq: updating prod idx = %d.\n",
1283 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001284 ql_write_db_reg(rx_ring->sbq_prod_idx,
1285 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001286 }
1287}
1288
1289static void ql_update_buffer_queues(struct ql_adapter *qdev,
1290 struct rx_ring *rx_ring)
1291{
1292 ql_update_sbq(qdev, rx_ring);
1293 ql_update_lbq(qdev, rx_ring);
1294}
1295
1296/* Unmaps tx buffers. Can be called from send() if a pci mapping
1297 * fails at some stage, or from the interrupt when a tx completes.
1298 */
1299static void ql_unmap_send(struct ql_adapter *qdev,
1300 struct tx_ring_desc *tx_ring_desc, int mapped)
1301{
1302 int i;
1303 for (i = 0; i < mapped; i++) {
1304 if (i == 0 || (i == 7 && mapped > 7)) {
1305 /*
1306 * Unmap the skb->data area, or the
1307 * external sglist (AKA the Outbound
1308 * Address List (OAL)).
1309 * If its the zeroeth element, then it's
1310 * the skb->data area. If it's the 7th
1311 * element and there is more than 6 frags,
1312 * then its an OAL.
1313 */
1314 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001315 netif_printk(qdev, tx_done, KERN_DEBUG,
1316 qdev->ndev,
1317 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001318 }
1319 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001320 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001321 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001322 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 maplen),
1324 PCI_DMA_TODEVICE);
1325 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001326 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1327 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001328 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001329 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001330 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001331 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001332 maplen), PCI_DMA_TODEVICE);
1333 }
1334 }
1335
1336}
1337
1338/* Map the buffers for this transmit. This will return
1339 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1340 */
1341static int ql_map_send(struct ql_adapter *qdev,
1342 struct ob_mac_iocb_req *mac_iocb_ptr,
1343 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1344{
1345 int len = skb_headlen(skb);
1346 dma_addr_t map;
1347 int frag_idx, err, map_idx = 0;
1348 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1349 int frag_cnt = skb_shinfo(skb)->nr_frags;
1350
1351 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001352 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1353 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001354 }
1355 /*
1356 * Map the skb buffer first.
1357 */
1358 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1359
1360 err = pci_dma_mapping_error(qdev->pdev, map);
1361 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001362 netif_err(qdev, tx_queued, qdev->ndev,
1363 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001364
1365 return NETDEV_TX_BUSY;
1366 }
1367
1368 tbd->len = cpu_to_le32(len);
1369 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001370 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1371 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001372 map_idx++;
1373
1374 /*
1375 * This loop fills the remainder of the 8 address descriptors
1376 * in the IOCB. If there are more than 7 fragments, then the
1377 * eighth address desc will point to an external list (OAL).
1378 * When this happens, the remainder of the frags will be stored
1379 * in this list.
1380 */
1381 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1382 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1383 tbd++;
1384 if (frag_idx == 6 && frag_cnt > 7) {
1385 /* Let's tack on an sglist.
1386 * Our control block will now
1387 * look like this:
1388 * iocb->seg[0] = skb->data
1389 * iocb->seg[1] = frag[0]
1390 * iocb->seg[2] = frag[1]
1391 * iocb->seg[3] = frag[2]
1392 * iocb->seg[4] = frag[3]
1393 * iocb->seg[5] = frag[4]
1394 * iocb->seg[6] = frag[5]
1395 * iocb->seg[7] = ptr to OAL (external sglist)
1396 * oal->seg[0] = frag[6]
1397 * oal->seg[1] = frag[7]
1398 * oal->seg[2] = frag[8]
1399 * oal->seg[3] = frag[9]
1400 * oal->seg[4] = frag[10]
1401 * etc...
1402 */
1403 /* Tack on the OAL in the eighth segment of IOCB. */
1404 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1405 sizeof(struct oal),
1406 PCI_DMA_TODEVICE);
1407 err = pci_dma_mapping_error(qdev->pdev, map);
1408 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001409 netif_err(qdev, tx_queued, qdev->ndev,
1410 "PCI mapping outbound address list with error: %d\n",
1411 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001412 goto map_error;
1413 }
1414
1415 tbd->addr = cpu_to_le64(map);
1416 /*
1417 * The length is the number of fragments
1418 * that remain to be mapped times the length
1419 * of our sglist (OAL).
1420 */
1421 tbd->len =
1422 cpu_to_le32((sizeof(struct tx_buf_desc) *
1423 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001424 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001425 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001426 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001427 sizeof(struct oal));
1428 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1429 map_idx++;
1430 }
1431
1432 map =
1433 pci_map_page(qdev->pdev, frag->page,
1434 frag->page_offset, frag->size,
1435 PCI_DMA_TODEVICE);
1436
1437 err = pci_dma_mapping_error(qdev->pdev, map);
1438 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1441 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001442 goto map_error;
1443 }
1444
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001449 frag->size);
1450
1451 }
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1457
1458map_error:
1459 /*
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1464 */
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1467}
1468
Ron Mercer4f848c02010-01-02 10:37:43 +00001469/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001470static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 u32 length,
1474 u16 vlan_id)
1475{
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct skb_frag_struct *rx_frag;
1479 int nr_frags;
1480 struct napi_struct *napi = &rx_ring->napi;
1481
1482 napi->dev = qdev->ndev;
1483
1484 skb = napi_get_frags(napi);
1485 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001486 netif_err(qdev, drv, qdev->ndev,
1487 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001488 rx_ring->rx_dropped++;
1489 put_page(lbq_desc->p.pg_chunk.page);
1490 return;
1491 }
1492 prefetch(lbq_desc->p.pg_chunk.va);
1493 rx_frag = skb_shinfo(skb)->frags;
1494 nr_frags = skb_shinfo(skb)->nr_frags;
1495 rx_frag += nr_frags;
1496 rx_frag->page = lbq_desc->p.pg_chunk.page;
1497 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1498 rx_frag->size = length;
1499
1500 skb->len += length;
1501 skb->data_len += length;
1502 skb->truesize += length;
1503 skb_shinfo(skb)->nr_frags++;
1504
1505 rx_ring->rx_packets++;
1506 rx_ring->rx_bytes += length;
1507 skb->ip_summed = CHECKSUM_UNNECESSARY;
1508 skb_record_rx_queue(skb, rx_ring->cq_id);
1509 if (qdev->vlgrp && (vlan_id != 0xffff))
1510 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1511 else
1512 napi_gro_frags(napi);
1513}
1514
1515/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001516static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517 struct rx_ring *rx_ring,
1518 struct ib_mac_iocb_rsp *ib_mac_rsp,
1519 u32 length,
1520 u16 vlan_id)
1521{
1522 struct net_device *ndev = qdev->ndev;
1523 struct sk_buff *skb = NULL;
1524 void *addr;
1525 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526 struct napi_struct *napi = &rx_ring->napi;
1527
1528 skb = netdev_alloc_skb(ndev, length);
1529 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001530 netif_err(qdev, drv, qdev->ndev,
1531 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001532 rx_ring->rx_dropped++;
1533 put_page(lbq_desc->p.pg_chunk.page);
1534 return;
1535 }
1536
1537 addr = lbq_desc->p.pg_chunk.va;
1538 prefetch(addr);
1539
1540
1541 /* Frame error, so drop the packet. */
1542 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001543 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001544 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001545 rx_ring->rx_errors++;
1546 goto err_out;
1547 }
1548
1549 /* The max framesize filter on this chip is set higher than
1550 * MTU since FCoE uses 2k frames.
1551 */
1552 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001553 netif_err(qdev, drv, qdev->ndev,
1554 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001555 rx_ring->rx_dropped++;
1556 goto err_out;
1557 }
1558 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1561 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001562 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1563 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1564 length-ETH_HLEN);
1565 skb->len += length-ETH_HLEN;
1566 skb->data_len += length-ETH_HLEN;
1567 skb->truesize += length-ETH_HLEN;
1568
1569 rx_ring->rx_packets++;
1570 rx_ring->rx_bytes += skb->len;
1571 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001572 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001573
1574 if (qdev->rx_csum &&
1575 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1576 /* TCP frame. */
1577 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001578 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1579 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001580 skb->ip_summed = CHECKSUM_UNNECESSARY;
1581 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1582 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1583 /* Unfragmented ipv4 UDP frame. */
1584 struct iphdr *iph = (struct iphdr *) skb->data;
1585 if (!(iph->frag_off &
1586 cpu_to_be16(IP_MF|IP_OFFSET))) {
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001588 netif_printk(qdev, rx_status, KERN_DEBUG,
1589 qdev->ndev,
1590 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001591 }
1592 }
1593 }
1594
1595 skb_record_rx_queue(skb, rx_ring->cq_id);
1596 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1597 if (qdev->vlgrp && (vlan_id != 0xffff))
1598 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1599 else
1600 napi_gro_receive(napi, skb);
1601 } else {
1602 if (qdev->vlgrp && (vlan_id != 0xffff))
1603 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1604 else
1605 netif_receive_skb(skb);
1606 }
1607 return;
1608err_out:
1609 dev_kfree_skb_any(skb);
1610 put_page(lbq_desc->p.pg_chunk.page);
1611}
1612
1613/* Process an inbound completion from an rx ring. */
1614static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1615 struct rx_ring *rx_ring,
1616 struct ib_mac_iocb_rsp *ib_mac_rsp,
1617 u32 length,
1618 u16 vlan_id)
1619{
1620 struct net_device *ndev = qdev->ndev;
1621 struct sk_buff *skb = NULL;
1622 struct sk_buff *new_skb = NULL;
1623 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1624
1625 skb = sbq_desc->p.skb;
1626 /* Allocate new_skb and copy */
1627 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1628 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001629 netif_err(qdev, probe, qdev->ndev,
1630 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001631 rx_ring->rx_dropped++;
1632 return;
1633 }
1634 skb_reserve(new_skb, NET_IP_ALIGN);
1635 memcpy(skb_put(new_skb, length), skb->data, length);
1636 skb = new_skb;
1637
1638 /* Frame error, so drop the packet. */
1639 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001640 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001641 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001642 dev_kfree_skb_any(skb);
1643 rx_ring->rx_errors++;
1644 return;
1645 }
1646
1647 /* loopback self test for ethtool */
1648 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1649 ql_check_lb_frame(qdev, skb);
1650 dev_kfree_skb_any(skb);
1651 return;
1652 }
1653
1654 /* The max framesize filter on this chip is set higher than
1655 * MTU since FCoE uses 2k frames.
1656 */
1657 if (skb->len > ndev->mtu + ETH_HLEN) {
1658 dev_kfree_skb_any(skb);
1659 rx_ring->rx_dropped++;
1660 return;
1661 }
1662
1663 prefetch(skb->data);
1664 skb->dev = ndev;
1665 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "%s Multicast.\n",
1668 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1670 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1671 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1672 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1673 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001674 }
1675 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001676 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1677 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001678
1679 rx_ring->rx_packets++;
1680 rx_ring->rx_bytes += skb->len;
1681 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001682 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001683
1684 /* If rx checksum is on, and there are no
1685 * csum or frame errors.
1686 */
1687 if (qdev->rx_csum &&
1688 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1689 /* TCP frame. */
1690 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001691 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001693 skb->ip_summed = CHECKSUM_UNNECESSARY;
1694 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1695 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1696 /* Unfragmented ipv4 UDP frame. */
1697 struct iphdr *iph = (struct iphdr *) skb->data;
1698 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001699 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001700 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001701 netif_printk(qdev, rx_status, KERN_DEBUG,
1702 qdev->ndev,
1703 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001704 }
1705 }
1706 }
1707
1708 skb_record_rx_queue(skb, rx_ring->cq_id);
1709 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1710 if (qdev->vlgrp && (vlan_id != 0xffff))
1711 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1712 vlan_id, skb);
1713 else
1714 napi_gro_receive(&rx_ring->napi, skb);
1715 } else {
1716 if (qdev->vlgrp && (vlan_id != 0xffff))
1717 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1718 else
1719 netif_receive_skb(skb);
1720 }
1721}
1722
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001723static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001724{
1725 void *temp_addr = skb->data;
1726
1727 /* Undo the skb_reserve(skb,32) we did before
1728 * giving to hardware, and realign data on
1729 * a 2-byte boundary.
1730 */
1731 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1732 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1733 skb_copy_to_linear_data(skb, temp_addr,
1734 (unsigned int)len);
1735}
1736
1737/*
1738 * This function builds an skb for the given inbound
1739 * completion. It will be rewritten for readability in the near
1740 * future, but for not it works well.
1741 */
1742static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1743 struct rx_ring *rx_ring,
1744 struct ib_mac_iocb_rsp *ib_mac_rsp)
1745{
1746 struct bq_desc *lbq_desc;
1747 struct bq_desc *sbq_desc;
1748 struct sk_buff *skb = NULL;
1749 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1750 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1751
1752 /*
1753 * Handle the header buffer if present.
1754 */
1755 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1756 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001757 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1758 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001759 /*
1760 * Headers fit nicely into a small buffer.
1761 */
1762 sbq_desc = ql_get_curr_sbuf(rx_ring);
1763 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001764 dma_unmap_addr(sbq_desc, mapaddr),
1765 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001766 PCI_DMA_FROMDEVICE);
1767 skb = sbq_desc->p.skb;
1768 ql_realign_skb(skb, hdr_len);
1769 skb_put(skb, hdr_len);
1770 sbq_desc->p.skb = NULL;
1771 }
1772
1773 /*
1774 * Handle the data buffer(s).
1775 */
1776 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001777 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1778 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001779 return skb;
1780 }
1781
1782 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1783 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001784 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1785 "Headers in small, data of %d bytes in small, combine them.\n",
1786 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001787 /*
1788 * Data is less than small buffer size so it's
1789 * stuffed in a small buffer.
1790 * For this case we append the data
1791 * from the "data" small buffer to the "header" small
1792 * buffer.
1793 */
1794 sbq_desc = ql_get_curr_sbuf(rx_ring);
1795 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001796 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001797 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001798 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001799 (sbq_desc, maplen),
1800 PCI_DMA_FROMDEVICE);
1801 memcpy(skb_put(skb, length),
1802 sbq_desc->p.skb->data, length);
1803 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 (sbq_desc,
1806 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001807 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001808 (sbq_desc,
1809 maplen),
1810 PCI_DMA_FROMDEVICE);
1811 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001812 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1813 "%d bytes in a single small buffer.\n",
1814 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001815 sbq_desc = ql_get_curr_sbuf(rx_ring);
1816 skb = sbq_desc->p.skb;
1817 ql_realign_skb(skb, length);
1818 skb_put(skb, length);
1819 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001820 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001821 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001822 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001823 maplen),
1824 PCI_DMA_FROMDEVICE);
1825 sbq_desc->p.skb = NULL;
1826 }
1827 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1828 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "Header in small, %d bytes in large. Chain large to small!\n",
1831 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001832 /*
1833 * The data is in a single large buffer. We
1834 * chain it to the header buffer's skb and let
1835 * it rip.
1836 */
Ron Mercer7c734352009-10-19 03:32:19 +00001837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001838 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839 "Chaining page at offset = %d, for %d bytes to skb.\n",
1840 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001841 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1842 lbq_desc->p.pg_chunk.offset,
1843 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001844 skb->len += length;
1845 skb->data_len += length;
1846 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001847 } else {
1848 /*
1849 * The headers and data are in a single large buffer. We
1850 * copy it to a new skb and let it go. This can happen with
1851 * jumbo mtu on a non-TCP/UDP frame.
1852 */
Ron Mercer7c734352009-10-19 03:32:19 +00001853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001854 skb = netdev_alloc_skb(qdev->ndev, length);
1855 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001856 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1857 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001858 return NULL;
1859 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001860 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001861 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001862 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001863 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001864 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001865 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001866 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1868 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001869 skb_fill_page_desc(skb, 0,
1870 lbq_desc->p.pg_chunk.page,
1871 lbq_desc->p.pg_chunk.offset,
1872 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001873 skb->len += length;
1874 skb->data_len += length;
1875 skb->truesize += length;
1876 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001877 __pskb_pull_tail(skb,
1878 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1879 VLAN_ETH_HLEN : ETH_HLEN);
1880 }
1881 } else {
1882 /*
1883 * The data is in a chain of large buffers
1884 * pointed to by a small buffer. We loop
1885 * thru and chain them to the our small header
1886 * buffer's skb.
1887 * frags: There are 18 max frags and our small
1888 * buffer will hold 32 of them. The thing is,
1889 * we'll use 3 max for our 9000 byte jumbo
1890 * frames. If the MTU goes up we could
1891 * eventually be in trouble.
1892 */
Ron Mercer7c734352009-10-19 03:32:19 +00001893 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001894 sbq_desc = ql_get_curr_sbuf(rx_ring);
1895 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001896 dma_unmap_addr(sbq_desc, mapaddr),
1897 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001898 PCI_DMA_FROMDEVICE);
1899 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1900 /*
1901 * This is an non TCP/UDP IP frame, so
1902 * the headers aren't split into a small
1903 * buffer. We have to use the small buffer
1904 * that contains our sg list as our skb to
1905 * send upstairs. Copy the sg list here to
1906 * a local buffer and use it to find the
1907 * pages to chain.
1908 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001909 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1910 "%d bytes of headers & data in chain of large.\n",
1911 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001912 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001913 sbq_desc->p.skb = NULL;
1914 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001915 }
1916 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001917 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1918 size = (length < rx_ring->lbq_buf_size) ? length :
1919 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001920
Joe Perchesae9540f72010-02-09 11:49:52 +00001921 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1922 "Adding page %d to skb for %d bytes.\n",
1923 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001924 skb_fill_page_desc(skb, i,
1925 lbq_desc->p.pg_chunk.page,
1926 lbq_desc->p.pg_chunk.offset,
1927 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001928 skb->len += size;
1929 skb->data_len += size;
1930 skb->truesize += size;
1931 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001932 i++;
1933 }
1934 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1935 VLAN_ETH_HLEN : ETH_HLEN);
1936 }
1937 return skb;
1938}
1939
1940/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001941static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001943 struct ib_mac_iocb_rsp *ib_mac_rsp,
1944 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001945{
1946 struct net_device *ndev = qdev->ndev;
1947 struct sk_buff *skb = NULL;
1948
1949 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1950
1951 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1952 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001953 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1954 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001955 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001956 return;
1957 }
1958
Ron Mercera32959c2009-06-09 05:39:27 +00001959 /* Frame error, so drop the packet. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001961 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001962 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001963 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001964 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001965 return;
1966 }
Ron Mercerec33a492009-06-09 05:39:28 +00001967
1968 /* The max framesize filter on this chip is set higher than
1969 * MTU since FCoE uses 2k frames.
1970 */
1971 if (skb->len > ndev->mtu + ETH_HLEN) {
1972 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001973 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001974 return;
1975 }
1976
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001977 /* loopback self test for ethtool */
1978 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1979 ql_check_lb_frame(qdev, skb);
1980 dev_kfree_skb_any(skb);
1981 return;
1982 }
1983
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001984 prefetch(skb->data);
1985 skb->dev = ndev;
1986 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001987 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1988 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1989 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1990 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1991 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1992 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1993 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001994 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001995 }
1996 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001997 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1998 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001999 }
Ron Mercerd555f592009-03-09 10:59:19 +00002000
Ron Mercerd555f592009-03-09 10:59:19 +00002001 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002002 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002003
2004 /* If rx checksum is on, and there are no
2005 * csum or frame errors.
2006 */
2007 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00002008 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2009 /* TCP frame. */
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002013 skb->ip_summed = CHECKSUM_UNNECESSARY;
2014 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2015 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2016 /* Unfragmented ipv4 UDP frame. */
2017 struct iphdr *iph = (struct iphdr *) skb->data;
2018 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002019 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002020 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002021 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2022 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002023 }
2024 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002025 }
Ron Mercerd555f592009-03-09 10:59:19 +00002026
Ron Mercer885ee392009-11-03 13:49:31 +00002027 rx_ring->rx_packets++;
2028 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002029 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002030 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2031 if (qdev->vlgrp &&
2032 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2033 (vlan_id != 0))
2034 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2035 vlan_id, skb);
2036 else
2037 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002038 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002039 if (qdev->vlgrp &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2041 (vlan_id != 0))
2042 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2043 else
2044 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002045 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002046}
2047
Ron Mercer4f848c02010-01-02 10:37:43 +00002048/* Process an inbound completion from an rx ring. */
2049static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2050 struct rx_ring *rx_ring,
2051 struct ib_mac_iocb_rsp *ib_mac_rsp)
2052{
2053 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2054 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2055 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2056 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2057
2058 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2059
2060 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2061 /* The data and headers are split into
2062 * separate buffers.
2063 */
2064 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2065 vlan_id);
2066 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2067 /* The data fit in a single small buffer.
2068 * Allocate a new skb, copy the data and
2069 * return the buffer to the free pool.
2070 */
2071 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2072 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002073 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2074 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2075 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2076 /* TCP packet in a page chunk that's been checksummed.
2077 * Tack it on to our GRO skb and let it go.
2078 */
2079 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2080 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002081 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2082 /* Non-TCP packet in a page chunk. Allocate an
2083 * skb, tack it on frags, and send it up.
2084 */
2085 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2086 length, vlan_id);
2087 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002088 /* Non-TCP/UDP large frames that span multiple buffers
2089 * can be processed corrrectly by the split frame logic.
2090 */
2091 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2092 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002093 }
2094
2095 return (unsigned long)length;
2096}
2097
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002098/* Process an outbound completion from an rx ring. */
2099static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2100 struct ob_mac_iocb_rsp *mac_rsp)
2101{
2102 struct tx_ring *tx_ring;
2103 struct tx_ring_desc *tx_ring_desc;
2104
2105 QL_DUMP_OB_MAC_RSP(mac_rsp);
2106 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2107 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2108 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002109 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2110 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002111 dev_kfree_skb(tx_ring_desc->skb);
2112 tx_ring_desc->skb = NULL;
2113
2114 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2115 OB_MAC_IOCB_RSP_S |
2116 OB_MAC_IOCB_RSP_L |
2117 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2118 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002119 netif_warn(qdev, tx_done, qdev->ndev,
2120 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002121 }
2122 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002123 netif_warn(qdev, tx_done, qdev->ndev,
2124 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002125 }
2126 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002127 netif_warn(qdev, tx_done, qdev->ndev,
2128 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002129 }
2130 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002131 netif_warn(qdev, tx_done, qdev->ndev,
2132 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002133 }
2134 }
2135 atomic_inc(&tx_ring->tx_count);
2136}
2137
2138/* Fire up a handler to reset the MPI processor. */
2139void ql_queue_fw_error(struct ql_adapter *qdev)
2140{
Ron Mercer6a473302009-07-02 06:06:12 +00002141 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002142 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2143}
2144
2145void ql_queue_asic_error(struct ql_adapter *qdev)
2146{
Ron Mercer6a473302009-07-02 06:06:12 +00002147 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002148 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002149 /* Clear adapter up bit to signal the recovery
2150 * process that it shouldn't kill the reset worker
2151 * thread
2152 */
2153 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002154 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2155}
2156
2157static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2158 struct ib_ae_iocb_rsp *ib_ae_rsp)
2159{
2160 switch (ib_ae_rsp->event) {
2161 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002162 netif_err(qdev, rx_err, qdev->ndev,
2163 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002164 ql_queue_fw_error(qdev);
2165 return;
2166
2167 case CAM_LOOKUP_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002168 netif_err(qdev, link, qdev->ndev,
2169 "Multiple CAM hits lookup occurred.\n");
2170 netif_err(qdev, drv, qdev->ndev,
2171 "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002172 ql_queue_asic_error(qdev);
2173 return;
2174
2175 case SOFT_ECC_ERROR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002176 netif_err(qdev, rx_err, qdev->ndev,
2177 "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002178 ql_queue_asic_error(qdev);
2179 break;
2180
2181 case PCI_ERR_ANON_BUF_RD:
Joe Perchesae9540f72010-02-09 11:49:52 +00002182 netif_err(qdev, rx_err, qdev->ndev,
2183 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2184 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185 ql_queue_asic_error(qdev);
2186 break;
2187
2188 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002189 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2190 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002191 ql_queue_asic_error(qdev);
2192 break;
2193 }
2194}
2195
2196static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2197{
2198 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002199 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002200 struct ob_mac_iocb_rsp *net_rsp = NULL;
2201 int count = 0;
2202
Ron Mercer1e213302009-03-09 10:59:21 +00002203 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002204 /* While there are entries in the completion queue. */
2205 while (prod != rx_ring->cnsmr_idx) {
2206
Joe Perchesae9540f72010-02-09 11:49:52 +00002207 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002210
2211 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2212 rmb();
2213 switch (net_rsp->opcode) {
2214
2215 case OPCODE_OB_MAC_TSO_IOCB:
2216 case OPCODE_OB_MAC_IOCB:
2217 ql_process_mac_tx_intr(qdev, net_rsp);
2218 break;
2219 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002220 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2222 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002223 }
2224 count++;
2225 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002226 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002227 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002228 if (!net_rsp)
2229 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002230 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002231 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002232 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002233 if (atomic_read(&tx_ring->queue_stopped) &&
2234 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2235 /*
2236 * The queue got stopped because the tx_ring was full.
2237 * Wake it up, because it's now at least 25% empty.
2238 */
Ron Mercer1e213302009-03-09 10:59:21 +00002239 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002240 }
2241
2242 return count;
2243}
2244
2245static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2246{
2247 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002248 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002249 struct ql_net_rsp_iocb *net_rsp;
2250 int count = 0;
2251
2252 /* While there are entries in the completion queue. */
2253 while (prod != rx_ring->cnsmr_idx) {
2254
Joe Perchesae9540f72010-02-09 11:49:52 +00002255 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2257 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002258
2259 net_rsp = rx_ring->curr_entry;
2260 rmb();
2261 switch (net_rsp->opcode) {
2262 case OPCODE_IB_MAC_IOCB:
2263 ql_process_mac_rx_intr(qdev, rx_ring,
2264 (struct ib_mac_iocb_rsp *)
2265 net_rsp);
2266 break;
2267
2268 case OPCODE_IB_AE_IOCB:
2269 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2270 net_rsp);
2271 break;
2272 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002273 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2274 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2275 net_rsp->opcode);
2276 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002277 }
2278 count++;
2279 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002280 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002281 if (count == budget)
2282 break;
2283 }
2284 ql_update_buffer_queues(qdev, rx_ring);
2285 ql_write_cq_idx(rx_ring);
2286 return count;
2287}
2288
2289static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2290{
2291 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2292 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002293 struct rx_ring *trx_ring;
2294 int i, work_done = 0;
2295 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002296
Joe Perchesae9540f72010-02-09 11:49:52 +00002297 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2298 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002299
Ron Mercer39aa8162009-08-27 11:02:11 +00002300 /* Service the TX rings first. They start
2301 * right after the RSS rings. */
2302 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2303 trx_ring = &qdev->rx_ring[i];
2304 /* If this TX completion ring belongs to this vector and
2305 * it's not empty then service it.
2306 */
2307 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2308 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2309 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002310 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2311 "%s: Servicing TX completion ring %d.\n",
2312 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002313 ql_clean_outbound_rx_ring(trx_ring);
2314 }
2315 }
2316
2317 /*
2318 * Now service the RSS ring if it's active.
2319 */
2320 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2321 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002322 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2323 "%s: Servicing RX completion ring %d.\n",
2324 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002325 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2326 }
2327
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002329 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002330 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2331 }
2332 return work_done;
2333}
2334
Ron Mercer01e6b952009-10-30 12:13:34 +00002335static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002336{
2337 struct ql_adapter *qdev = netdev_priv(ndev);
2338
2339 qdev->vlgrp = grp;
2340 if (grp) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002341 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2342 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002343 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2344 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2345 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00002346 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2347 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349 }
2350}
2351
Ron Mercer01e6b952009-10-30 12:13:34 +00002352static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002353{
2354 struct ql_adapter *qdev = netdev_priv(ndev);
2355 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002356 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002357
Ron Mercercc288f52009-02-23 10:42:14 +00002358 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359 if (status)
2360 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002361 if (ql_set_mac_addr_reg
2362 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002363 netif_err(qdev, ifup, qdev->ndev,
2364 "Failed to init vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002365 }
Ron Mercercc288f52009-02-23 10:42:14 +00002366 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002367}
2368
Ron Mercer01e6b952009-10-30 12:13:34 +00002369static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002370{
2371 struct ql_adapter *qdev = netdev_priv(ndev);
2372 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002373 int status;
2374
2375 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376 if (status)
2377 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002378
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002379 if (ql_set_mac_addr_reg
2380 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002381 netif_err(qdev, ifup, qdev->ndev,
2382 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002383 }
Ron Mercercc288f52009-02-23 10:42:14 +00002384 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002385
2386}
2387
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002388/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2389static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2390{
2391 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002392 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002393 return IRQ_HANDLED;
2394}
2395
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002396/* This handles a fatal error, MPI activity, and the default
2397 * rx_ring in an MSI-X multiple vector environment.
2398 * In MSI/Legacy environment it also process the rest of
2399 * the rx_rings.
2400 */
2401static irqreturn_t qlge_isr(int irq, void *dev_id)
2402{
2403 struct rx_ring *rx_ring = dev_id;
2404 struct ql_adapter *qdev = rx_ring->qdev;
2405 struct intr_context *intr_context = &qdev->intr_context[0];
2406 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002407 int work_done = 0;
2408
Ron Mercerbb0d2152008-10-20 10:30:26 -07002409 spin_lock(&qdev->hw_lock);
2410 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002411 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2412 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002413 spin_unlock(&qdev->hw_lock);
2414 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002415 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002416 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002417
Ron Mercerbb0d2152008-10-20 10:30:26 -07002418 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002419
2420 /*
2421 * Check for fatal error.
2422 */
2423 if (var & STS_FE) {
2424 ql_queue_asic_error(qdev);
Joe Perchesae9540f72010-02-09 11:49:52 +00002425 netif_err(qdev, intr, qdev->ndev,
2426 "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002427 var = ql_read32(qdev, ERR_STS);
Joe Perchesae9540f72010-02-09 11:49:52 +00002428 netif_err(qdev, intr, qdev->ndev,
2429 "Resetting chip. Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002430 return IRQ_HANDLED;
2431 }
2432
2433 /*
2434 * Check MPI processor activity.
2435 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002436 if ((var & STS_PI) &&
2437 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002438 /*
2439 * We've got an async event or mailbox completion.
2440 * Handle it and clear the source of the interrupt.
2441 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002442 netif_err(qdev, intr, qdev->ndev,
2443 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002444 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002445 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2446 queue_delayed_work_on(smp_processor_id(),
2447 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448 work_done++;
2449 }
2450
2451 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002452 * Get the bit-mask that shows the active queues for this
2453 * pass. Compare it to the queues that this irq services
2454 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002455 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002456 var = ql_read32(qdev, ISR1);
2457 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002458 netif_info(qdev, intr, qdev->ndev,
2459 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002460 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002461 napi_schedule(&rx_ring->napi);
2462 work_done++;
2463 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002464 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002465 return work_done ? IRQ_HANDLED : IRQ_NONE;
2466}
2467
2468static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2469{
2470
2471 if (skb_is_gso(skb)) {
2472 int err;
2473 if (skb_header_cloned(skb)) {
2474 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2475 if (err)
2476 return err;
2477 }
2478
2479 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2480 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2481 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2482 mac_iocb_ptr->total_hdrs_len =
2483 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2484 mac_iocb_ptr->net_trans_offset =
2485 cpu_to_le16(skb_network_offset(skb) |
2486 skb_transport_offset(skb)
2487 << OB_MAC_TRANSPORT_HDR_SHIFT);
2488 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2489 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2490 if (likely(skb->protocol == htons(ETH_P_IP))) {
2491 struct iphdr *iph = ip_hdr(skb);
2492 iph->check = 0;
2493 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2494 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2495 iph->daddr, 0,
2496 IPPROTO_TCP,
2497 0);
2498 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2499 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2500 tcp_hdr(skb)->check =
2501 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2502 &ipv6_hdr(skb)->daddr,
2503 0, IPPROTO_TCP, 0);
2504 }
2505 return 1;
2506 }
2507 return 0;
2508}
2509
2510static void ql_hw_csum_setup(struct sk_buff *skb,
2511 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2512{
2513 int len;
2514 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002515 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002516 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2517 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2518 mac_iocb_ptr->net_trans_offset =
2519 cpu_to_le16(skb_network_offset(skb) |
2520 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2521
2522 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2523 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2524 if (likely(iph->protocol == IPPROTO_TCP)) {
2525 check = &(tcp_hdr(skb)->check);
2526 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2527 mac_iocb_ptr->total_hdrs_len =
2528 cpu_to_le16(skb_transport_offset(skb) +
2529 (tcp_hdr(skb)->doff << 2));
2530 } else {
2531 check = &(udp_hdr(skb)->check);
2532 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2533 mac_iocb_ptr->total_hdrs_len =
2534 cpu_to_le16(skb_transport_offset(skb) +
2535 sizeof(struct udphdr));
2536 }
2537 *check = ~csum_tcpudp_magic(iph->saddr,
2538 iph->daddr, len, iph->protocol, 0);
2539}
2540
Stephen Hemminger613573252009-08-31 19:50:58 +00002541static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002542{
2543 struct tx_ring_desc *tx_ring_desc;
2544 struct ob_mac_iocb_req *mac_iocb_ptr;
2545 struct ql_adapter *qdev = netdev_priv(ndev);
2546 int tso;
2547 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002548 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002549
2550 tx_ring = &qdev->tx_ring[tx_ring_idx];
2551
Ron Mercer74c50b42009-03-09 10:59:27 +00002552 if (skb_padto(skb, ETH_ZLEN))
2553 return NETDEV_TX_OK;
2554
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002555 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002556 netif_info(qdev, tx_queued, qdev->ndev,
2557 "%s: shutting down tx queue %d du to lack of resources.\n",
2558 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002559 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002560 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002561 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002562 return NETDEV_TX_BUSY;
2563 }
2564 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2565 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002566 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002567
2568 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2569 mac_iocb_ptr->tid = tx_ring_desc->index;
2570 /* We use the upper 32-bits to store the tx queue for this IO.
2571 * When we get the completion we can use it to establish the context.
2572 */
2573 mac_iocb_ptr->txq_idx = tx_ring_idx;
2574 tx_ring_desc->skb = skb;
2575
2576 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2577
Jesse Grosseab6d182010-10-20 13:56:03 +00002578 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002579 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2580 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002581 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2582 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2583 }
2584 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2585 if (tso < 0) {
2586 dev_kfree_skb_any(skb);
2587 return NETDEV_TX_OK;
2588 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2589 ql_hw_csum_setup(skb,
2590 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2591 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002592 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2593 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002594 netif_err(qdev, tx_queued, qdev->ndev,
2595 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002596 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002597 return NETDEV_TX_BUSY;
2598 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002599 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2600 tx_ring->prod_idx++;
2601 if (tx_ring->prod_idx == tx_ring->wq_len)
2602 tx_ring->prod_idx = 0;
2603 wmb();
2604
2605 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002606 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2607 "tx queued, slot %d, len %d\n",
2608 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002609
2610 atomic_dec(&tx_ring->tx_count);
2611 return NETDEV_TX_OK;
2612}
2613
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002614
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002615static void ql_free_shadow_space(struct ql_adapter *qdev)
2616{
2617 if (qdev->rx_ring_shadow_reg_area) {
2618 pci_free_consistent(qdev->pdev,
2619 PAGE_SIZE,
2620 qdev->rx_ring_shadow_reg_area,
2621 qdev->rx_ring_shadow_reg_dma);
2622 qdev->rx_ring_shadow_reg_area = NULL;
2623 }
2624 if (qdev->tx_ring_shadow_reg_area) {
2625 pci_free_consistent(qdev->pdev,
2626 PAGE_SIZE,
2627 qdev->tx_ring_shadow_reg_area,
2628 qdev->tx_ring_shadow_reg_dma);
2629 qdev->tx_ring_shadow_reg_area = NULL;
2630 }
2631}
2632
2633static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2634{
2635 qdev->rx_ring_shadow_reg_area =
2636 pci_alloc_consistent(qdev->pdev,
2637 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2638 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002639 netif_err(qdev, ifup, qdev->ndev,
2640 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002641 return -ENOMEM;
2642 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002643 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002644 qdev->tx_ring_shadow_reg_area =
2645 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2646 &qdev->tx_ring_shadow_reg_dma);
2647 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002648 netif_err(qdev, ifup, qdev->ndev,
2649 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002650 goto err_wqp_sh_area;
2651 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002652 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002653 return 0;
2654
2655err_wqp_sh_area:
2656 pci_free_consistent(qdev->pdev,
2657 PAGE_SIZE,
2658 qdev->rx_ring_shadow_reg_area,
2659 qdev->rx_ring_shadow_reg_dma);
2660 return -ENOMEM;
2661}
2662
2663static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2664{
2665 struct tx_ring_desc *tx_ring_desc;
2666 int i;
2667 struct ob_mac_iocb_req *mac_iocb_ptr;
2668
2669 mac_iocb_ptr = tx_ring->wq_base;
2670 tx_ring_desc = tx_ring->q;
2671 for (i = 0; i < tx_ring->wq_len; i++) {
2672 tx_ring_desc->index = i;
2673 tx_ring_desc->skb = NULL;
2674 tx_ring_desc->queue_entry = mac_iocb_ptr;
2675 mac_iocb_ptr++;
2676 tx_ring_desc++;
2677 }
2678 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2679 atomic_set(&tx_ring->queue_stopped, 0);
2680}
2681
2682static void ql_free_tx_resources(struct ql_adapter *qdev,
2683 struct tx_ring *tx_ring)
2684{
2685 if (tx_ring->wq_base) {
2686 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2687 tx_ring->wq_base, tx_ring->wq_base_dma);
2688 tx_ring->wq_base = NULL;
2689 }
2690 kfree(tx_ring->q);
2691 tx_ring->q = NULL;
2692}
2693
2694static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2695 struct tx_ring *tx_ring)
2696{
2697 tx_ring->wq_base =
2698 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2699 &tx_ring->wq_base_dma);
2700
Joe Perches8e95a202009-12-03 07:58:21 +00002701 if ((tx_ring->wq_base == NULL) ||
2702 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002703 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002704 return -ENOMEM;
2705 }
2706 tx_ring->q =
2707 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2708 if (tx_ring->q == NULL)
2709 goto err;
2710
2711 return 0;
2712err:
2713 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2714 tx_ring->wq_base, tx_ring->wq_base_dma);
2715 return -ENOMEM;
2716}
2717
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002718static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002719{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002720 struct bq_desc *lbq_desc;
2721
Ron Mercer7c734352009-10-19 03:32:19 +00002722 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002723
Ron Mercer7c734352009-10-19 03:32:19 +00002724 curr_idx = rx_ring->lbq_curr_idx;
2725 clean_idx = rx_ring->lbq_clean_idx;
2726 while (curr_idx != clean_idx) {
2727 lbq_desc = &rx_ring->lbq[curr_idx];
2728
2729 if (lbq_desc->p.pg_chunk.last_flag) {
2730 pci_unmap_page(qdev->pdev,
2731 lbq_desc->p.pg_chunk.map,
2732 ql_lbq_block_size(qdev),
2733 PCI_DMA_FROMDEVICE);
2734 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002735 }
Ron Mercer7c734352009-10-19 03:32:19 +00002736
2737 put_page(lbq_desc->p.pg_chunk.page);
2738 lbq_desc->p.pg_chunk.page = NULL;
2739
2740 if (++curr_idx == rx_ring->lbq_len)
2741 curr_idx = 0;
2742
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002743 }
2744}
2745
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002746static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747{
2748 int i;
2749 struct bq_desc *sbq_desc;
2750
2751 for (i = 0; i < rx_ring->sbq_len; i++) {
2752 sbq_desc = &rx_ring->sbq[i];
2753 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002754 netif_err(qdev, ifup, qdev->ndev,
2755 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002756 return;
2757 }
2758 if (sbq_desc->p.skb) {
2759 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002760 dma_unmap_addr(sbq_desc, mapaddr),
2761 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002762 PCI_DMA_FROMDEVICE);
2763 dev_kfree_skb(sbq_desc->p.skb);
2764 sbq_desc->p.skb = NULL;
2765 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002766 }
2767}
2768
Ron Mercer4545a3f2009-02-23 10:42:17 +00002769/* Free all large and small rx buffers associated
2770 * with the completion queues for this device.
2771 */
2772static void ql_free_rx_buffers(struct ql_adapter *qdev)
2773{
2774 int i;
2775 struct rx_ring *rx_ring;
2776
2777 for (i = 0; i < qdev->rx_ring_count; i++) {
2778 rx_ring = &qdev->rx_ring[i];
2779 if (rx_ring->lbq)
2780 ql_free_lbq_buffers(qdev, rx_ring);
2781 if (rx_ring->sbq)
2782 ql_free_sbq_buffers(qdev, rx_ring);
2783 }
2784}
2785
2786static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2787{
2788 struct rx_ring *rx_ring;
2789 int i;
2790
2791 for (i = 0; i < qdev->rx_ring_count; i++) {
2792 rx_ring = &qdev->rx_ring[i];
2793 if (rx_ring->type != TX_Q)
2794 ql_update_buffer_queues(qdev, rx_ring);
2795 }
2796}
2797
2798static void ql_init_lbq_ring(struct ql_adapter *qdev,
2799 struct rx_ring *rx_ring)
2800{
2801 int i;
2802 struct bq_desc *lbq_desc;
2803 __le64 *bq = rx_ring->lbq_base;
2804
2805 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2806 for (i = 0; i < rx_ring->lbq_len; i++) {
2807 lbq_desc = &rx_ring->lbq[i];
2808 memset(lbq_desc, 0, sizeof(*lbq_desc));
2809 lbq_desc->index = i;
2810 lbq_desc->addr = bq;
2811 bq++;
2812 }
2813}
2814
2815static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002816 struct rx_ring *rx_ring)
2817{
2818 int i;
2819 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002820 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002821
Ron Mercer4545a3f2009-02-23 10:42:17 +00002822 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002823 for (i = 0; i < rx_ring->sbq_len; i++) {
2824 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002825 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002826 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002827 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002828 bq++;
2829 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002830}
2831
2832static void ql_free_rx_resources(struct ql_adapter *qdev,
2833 struct rx_ring *rx_ring)
2834{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002835 /* Free the small buffer queue. */
2836 if (rx_ring->sbq_base) {
2837 pci_free_consistent(qdev->pdev,
2838 rx_ring->sbq_size,
2839 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2840 rx_ring->sbq_base = NULL;
2841 }
2842
2843 /* Free the small buffer queue control blocks. */
2844 kfree(rx_ring->sbq);
2845 rx_ring->sbq = NULL;
2846
2847 /* Free the large buffer queue. */
2848 if (rx_ring->lbq_base) {
2849 pci_free_consistent(qdev->pdev,
2850 rx_ring->lbq_size,
2851 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2852 rx_ring->lbq_base = NULL;
2853 }
2854
2855 /* Free the large buffer queue control blocks. */
2856 kfree(rx_ring->lbq);
2857 rx_ring->lbq = NULL;
2858
2859 /* Free the rx queue. */
2860 if (rx_ring->cq_base) {
2861 pci_free_consistent(qdev->pdev,
2862 rx_ring->cq_size,
2863 rx_ring->cq_base, rx_ring->cq_base_dma);
2864 rx_ring->cq_base = NULL;
2865 }
2866}
2867
2868/* Allocate queues and buffers for this completions queue based
2869 * on the values in the parameter structure. */
2870static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2871 struct rx_ring *rx_ring)
2872{
2873
2874 /*
2875 * Allocate the completion queue for this rx_ring.
2876 */
2877 rx_ring->cq_base =
2878 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2879 &rx_ring->cq_base_dma);
2880
2881 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002882 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002883 return -ENOMEM;
2884 }
2885
2886 if (rx_ring->sbq_len) {
2887 /*
2888 * Allocate small buffer queue.
2889 */
2890 rx_ring->sbq_base =
2891 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2892 &rx_ring->sbq_base_dma);
2893
2894 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002895 netif_err(qdev, ifup, qdev->ndev,
2896 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002897 goto err_mem;
2898 }
2899
2900 /*
2901 * Allocate small buffer queue control blocks.
2902 */
2903 rx_ring->sbq =
2904 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2905 GFP_KERNEL);
2906 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002907 netif_err(qdev, ifup, qdev->ndev,
2908 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002909 goto err_mem;
2910 }
2911
Ron Mercer4545a3f2009-02-23 10:42:17 +00002912 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002913 }
2914
2915 if (rx_ring->lbq_len) {
2916 /*
2917 * Allocate large buffer queue.
2918 */
2919 rx_ring->lbq_base =
2920 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2921 &rx_ring->lbq_base_dma);
2922
2923 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002924 netif_err(qdev, ifup, qdev->ndev,
2925 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002926 goto err_mem;
2927 }
2928 /*
2929 * Allocate large buffer queue control blocks.
2930 */
2931 rx_ring->lbq =
2932 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2933 GFP_KERNEL);
2934 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002935 netif_err(qdev, ifup, qdev->ndev,
2936 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937 goto err_mem;
2938 }
2939
Ron Mercer4545a3f2009-02-23 10:42:17 +00002940 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941 }
2942
2943 return 0;
2944
2945err_mem:
2946 ql_free_rx_resources(qdev, rx_ring);
2947 return -ENOMEM;
2948}
2949
2950static void ql_tx_ring_clean(struct ql_adapter *qdev)
2951{
2952 struct tx_ring *tx_ring;
2953 struct tx_ring_desc *tx_ring_desc;
2954 int i, j;
2955
2956 /*
2957 * Loop through all queues and free
2958 * any resources.
2959 */
2960 for (j = 0; j < qdev->tx_ring_count; j++) {
2961 tx_ring = &qdev->tx_ring[j];
2962 for (i = 0; i < tx_ring->wq_len; i++) {
2963 tx_ring_desc = &tx_ring->q[i];
2964 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002965 netif_err(qdev, ifdown, qdev->ndev,
2966 "Freeing lost SKB %p, from queue %d, index %d.\n",
2967 tx_ring_desc->skb, j,
2968 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002969 ql_unmap_send(qdev, tx_ring_desc,
2970 tx_ring_desc->map_cnt);
2971 dev_kfree_skb(tx_ring_desc->skb);
2972 tx_ring_desc->skb = NULL;
2973 }
2974 }
2975 }
2976}
2977
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002978static void ql_free_mem_resources(struct ql_adapter *qdev)
2979{
2980 int i;
2981
2982 for (i = 0; i < qdev->tx_ring_count; i++)
2983 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2984 for (i = 0; i < qdev->rx_ring_count; i++)
2985 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2986 ql_free_shadow_space(qdev);
2987}
2988
2989static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2990{
2991 int i;
2992
2993 /* Allocate space for our shadow registers and such. */
2994 if (ql_alloc_shadow_space(qdev))
2995 return -ENOMEM;
2996
2997 for (i = 0; i < qdev->rx_ring_count; i++) {
2998 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002999 netif_err(qdev, ifup, qdev->ndev,
3000 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003001 goto err_mem;
3002 }
3003 }
3004 /* Allocate tx queue resources */
3005 for (i = 0; i < qdev->tx_ring_count; i++) {
3006 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003007 netif_err(qdev, ifup, qdev->ndev,
3008 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003009 goto err_mem;
3010 }
3011 }
3012 return 0;
3013
3014err_mem:
3015 ql_free_mem_resources(qdev);
3016 return -ENOMEM;
3017}
3018
3019/* Set up the rx ring control block and pass it to the chip.
3020 * The control block is defined as
3021 * "Completion Queue Initialization Control Block", or cqicb.
3022 */
3023static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3024{
3025 struct cqicb *cqicb = &rx_ring->cqicb;
3026 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003027 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003028 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003029 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003030 void __iomem *doorbell_area =
3031 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3032 int err = 0;
3033 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003034 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003035 __le64 *base_indirect_ptr;
3036 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003037
3038 /* Set up the shadow registers for this ring. */
3039 rx_ring->prod_idx_sh_reg = shadow_reg;
3040 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003041 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003042 shadow_reg += sizeof(u64);
3043 shadow_reg_dma += sizeof(u64);
3044 rx_ring->lbq_base_indirect = shadow_reg;
3045 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003046 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3047 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003048 rx_ring->sbq_base_indirect = shadow_reg;
3049 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3050
3051 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003052 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003053 rx_ring->cnsmr_idx = 0;
3054 rx_ring->curr_entry = rx_ring->cq_base;
3055
3056 /* PCI doorbell mem area + 0x04 for valid register */
3057 rx_ring->valid_db_reg = doorbell_area + 0x04;
3058
3059 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003060 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003061
3062 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003063 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003064
3065 memset((void *)cqicb, 0, sizeof(struct cqicb));
3066 cqicb->msix_vect = rx_ring->irq;
3067
Ron Mercer459caf52009-01-04 17:08:11 -08003068 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3069 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003070
Ron Mercer97345522009-01-09 11:31:50 +00003071 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072
Ron Mercer97345522009-01-09 11:31:50 +00003073 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003074
3075 /*
3076 * Set up the control block load flags.
3077 */
3078 cqicb->flags = FLAGS_LC | /* Load queue base address */
3079 FLAGS_LV | /* Load MSI-X vector */
3080 FLAGS_LI; /* Load irq delay values */
3081 if (rx_ring->lbq_len) {
3082 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003083 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003084 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3085 page_entries = 0;
3086 do {
3087 *base_indirect_ptr = cpu_to_le64(tmp);
3088 tmp += DB_PAGE_SIZE;
3089 base_indirect_ptr++;
3090 page_entries++;
3091 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003092 cqicb->lbq_addr =
3093 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003094 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3095 (u16) rx_ring->lbq_buf_size;
3096 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3097 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3098 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003099 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003100 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003101 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003102 rx_ring->lbq_clean_idx = 0;
3103 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003104 }
3105 if (rx_ring->sbq_len) {
3106 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003107 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003108 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3109 page_entries = 0;
3110 do {
3111 *base_indirect_ptr = cpu_to_le64(tmp);
3112 tmp += DB_PAGE_SIZE;
3113 base_indirect_ptr++;
3114 page_entries++;
3115 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003116 cqicb->sbq_addr =
3117 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003118 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003119 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003120 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3121 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003122 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003123 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003124 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003125 rx_ring->sbq_clean_idx = 0;
3126 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003127 }
3128 switch (rx_ring->type) {
3129 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003130 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3131 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3132 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003133 case RX_Q:
3134 /* Inbound completion handling rx_rings run in
3135 * separate NAPI contexts.
3136 */
3137 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3138 64);
3139 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3140 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3141 break;
3142 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003143 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3144 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003145 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003146 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3147 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003148 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3149 CFG_LCQ, rx_ring->cq_id);
3150 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003151 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152 return err;
3153 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 return err;
3155}
3156
3157static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3158{
3159 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3160 void __iomem *doorbell_area =
3161 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3162 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3163 (tx_ring->wq_id * sizeof(u64));
3164 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3165 (tx_ring->wq_id * sizeof(u64));
3166 int err = 0;
3167
3168 /*
3169 * Assign doorbell registers for this tx_ring.
3170 */
3171 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003172 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003173 tx_ring->prod_idx = 0;
3174 /* TX PCI doorbell mem area + 0x04 */
3175 tx_ring->valid_db_reg = doorbell_area + 0x04;
3176
3177 /*
3178 * Assign shadow registers for this tx_ring.
3179 */
3180 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3181 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3182
3183 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3184 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3185 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3186 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3187 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003188 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003189
Ron Mercer97345522009-01-09 11:31:50 +00003190 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003191
3192 ql_init_tx_ring(qdev, tx_ring);
3193
Ron Mercere3324712009-07-02 06:06:13 +00003194 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003195 (u16) tx_ring->wq_id);
3196 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003197 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198 return err;
3199 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003200 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3201 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003202 return err;
3203}
3204
3205static void ql_disable_msix(struct ql_adapter *qdev)
3206{
3207 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3208 pci_disable_msix(qdev->pdev);
3209 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3210 kfree(qdev->msi_x_entry);
3211 qdev->msi_x_entry = NULL;
3212 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3213 pci_disable_msi(qdev->pdev);
3214 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3215 }
3216}
3217
Ron Mercera4ab6132009-08-27 11:02:10 +00003218/* We start by trying to get the number of vectors
3219 * stored in qdev->intr_count. If we don't get that
3220 * many then we reduce the count and try again.
3221 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003222static void ql_enable_msix(struct ql_adapter *qdev)
3223{
Ron Mercera4ab6132009-08-27 11:02:10 +00003224 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003225
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003227 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003228 /* Try to alloc space for the msix struct,
3229 * if it fails then go to MSI/legacy.
3230 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003231 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232 sizeof(struct msix_entry),
3233 GFP_KERNEL);
3234 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003235 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 goto msi;
3237 }
3238
Ron Mercera4ab6132009-08-27 11:02:10 +00003239 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240 qdev->msi_x_entry[i].entry = i;
3241
Ron Mercera4ab6132009-08-27 11:02:10 +00003242 /* Loop to get our vectors. We start with
3243 * what we want and settle for what we get.
3244 */
3245 do {
3246 err = pci_enable_msix(qdev->pdev,
3247 qdev->msi_x_entry, qdev->intr_count);
3248 if (err > 0)
3249 qdev->intr_count = err;
3250 } while (err > 0);
3251
3252 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003253 kfree(qdev->msi_x_entry);
3254 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003255 netif_warn(qdev, ifup, qdev->ndev,
3256 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003257 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003258 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003259 } else if (err == 0) {
3260 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003261 netif_info(qdev, ifup, qdev->ndev,
3262 "MSI-X Enabled, got %d vectors.\n",
3263 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003264 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003265 }
3266 }
3267msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003268 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003269 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003270 if (!pci_enable_msi(qdev->pdev)) {
3271 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003272 netif_info(qdev, ifup, qdev->ndev,
3273 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003274 return;
3275 }
3276 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003277 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003278 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3279 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003280}
3281
Ron Mercer39aa8162009-08-27 11:02:11 +00003282/* Each vector services 1 RSS ring and and 1 or more
3283 * TX completion rings. This function loops through
3284 * the TX completion rings and assigns the vector that
3285 * will service it. An example would be if there are
3286 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3287 * This would mean that vector 0 would service RSS ring 0
3288 * and TX competion rings 0,1,2 and 3. Vector 1 would
3289 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3290 */
3291static void ql_set_tx_vect(struct ql_adapter *qdev)
3292{
3293 int i, j, vect;
3294 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3295
3296 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3297 /* Assign irq vectors to TX rx_rings.*/
3298 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3299 i < qdev->rx_ring_count; i++) {
3300 if (j == tx_rings_per_vector) {
3301 vect++;
3302 j = 0;
3303 }
3304 qdev->rx_ring[i].irq = vect;
3305 j++;
3306 }
3307 } else {
3308 /* For single vector all rings have an irq
3309 * of zero.
3310 */
3311 for (i = 0; i < qdev->rx_ring_count; i++)
3312 qdev->rx_ring[i].irq = 0;
3313 }
3314}
3315
3316/* Set the interrupt mask for this vector. Each vector
3317 * will service 1 RSS ring and 1 or more TX completion
3318 * rings. This function sets up a bit mask per vector
3319 * that indicates which rings it services.
3320 */
3321static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3322{
3323 int j, vect = ctx->intr;
3324 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3325
3326 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327 /* Add the RSS ring serviced by this vector
3328 * to the mask.
3329 */
3330 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3331 /* Add the TX ring(s) serviced by this vector
3332 * to the mask. */
3333 for (j = 0; j < tx_rings_per_vector; j++) {
3334 ctx->irq_mask |=
3335 (1 << qdev->rx_ring[qdev->rss_ring_count +
3336 (vect * tx_rings_per_vector) + j].cq_id);
3337 }
3338 } else {
3339 /* For single vector we just shift each queue's
3340 * ID into the mask.
3341 */
3342 for (j = 0; j < qdev->rx_ring_count; j++)
3343 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3344 }
3345}
3346
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003347/*
3348 * Here we build the intr_context structures based on
3349 * our rx_ring count and intr vector count.
3350 * The intr_context structure is used to hook each vector
3351 * to possibly different handlers.
3352 */
3353static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3354{
3355 int i = 0;
3356 struct intr_context *intr_context = &qdev->intr_context[0];
3357
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003358 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359 /* Each rx_ring has it's
3360 * own intr_context since we have separate
3361 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003362 */
3363 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3364 qdev->rx_ring[i].irq = i;
3365 intr_context->intr = i;
3366 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003367 /* Set up this vector's bit-mask that indicates
3368 * which queues it services.
3369 */
3370 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003371 /*
3372 * We set up each vectors enable/disable/read bits so
3373 * there's no bit/mask calculations in the critical path.
3374 */
3375 intr_context->intr_en_mask =
3376 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3377 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3378 | i;
3379 intr_context->intr_dis_mask =
3380 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3382 INTR_EN_IHD | i;
3383 intr_context->intr_read_mask =
3384 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3386 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003387 if (i == 0) {
3388 /* The first vector/queue handles
3389 * broadcast/multicast, fatal errors,
3390 * and firmware events. This in addition
3391 * to normal inbound NAPI processing.
3392 */
3393 intr_context->handler = qlge_isr;
3394 sprintf(intr_context->name, "%s-rx-%d",
3395 qdev->ndev->name, i);
3396 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003397 /*
3398 * Inbound queues handle unicast frames only.
3399 */
3400 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003401 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003402 qdev->ndev->name, i);
3403 }
3404 }
3405 } else {
3406 /*
3407 * All rx_rings use the same intr_context since
3408 * there is only one vector.
3409 */
3410 intr_context->intr = 0;
3411 intr_context->qdev = qdev;
3412 /*
3413 * We set up each vectors enable/disable/read bits so
3414 * there's no bit/mask calculations in the critical path.
3415 */
3416 intr_context->intr_en_mask =
3417 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3418 intr_context->intr_dis_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420 INTR_EN_TYPE_DISABLE;
3421 intr_context->intr_read_mask =
3422 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3423 /*
3424 * Single interrupt means one handler for all rings.
3425 */
3426 intr_context->handler = qlge_isr;
3427 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003428 /* Set up this vector's bit-mask that indicates
3429 * which queues it services. In this case there is
3430 * a single vector so it will service all RSS and
3431 * TX completion rings.
3432 */
3433 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003434 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003435 /* Tell the TX completion rings which MSIx vector
3436 * they will be using.
3437 */
3438 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003439}
3440
3441static void ql_free_irq(struct ql_adapter *qdev)
3442{
3443 int i;
3444 struct intr_context *intr_context = &qdev->intr_context[0];
3445
3446 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3447 if (intr_context->hooked) {
3448 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3449 free_irq(qdev->msi_x_entry[i].vector,
3450 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003451 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3452 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003453 } else {
3454 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003455 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3456 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003457 }
3458 }
3459 }
3460 ql_disable_msix(qdev);
3461}
3462
3463static int ql_request_irq(struct ql_adapter *qdev)
3464{
3465 int i;
3466 int status = 0;
3467 struct pci_dev *pdev = qdev->pdev;
3468 struct intr_context *intr_context = &qdev->intr_context[0];
3469
3470 ql_resolve_queues_to_irqs(qdev);
3471
3472 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3473 atomic_set(&intr_context->irq_cnt, 0);
3474 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3475 status = request_irq(qdev->msi_x_entry[i].vector,
3476 intr_context->handler,
3477 0,
3478 intr_context->name,
3479 &qdev->rx_ring[i]);
3480 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003481 netif_err(qdev, ifup, qdev->ndev,
3482 "Failed request for MSIX interrupt %d.\n",
3483 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003484 goto err_irq;
3485 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003486 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3487 "Hooked intr %d, queue type %s, with name %s.\n",
3488 i,
3489 qdev->rx_ring[i].type == DEFAULT_Q ?
3490 "DEFAULT_Q" :
3491 qdev->rx_ring[i].type == TX_Q ?
3492 "TX_Q" :
3493 qdev->rx_ring[i].type == RX_Q ?
3494 "RX_Q" : "",
3495 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003496 }
3497 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003498 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3499 "trying msi or legacy interrupts.\n");
3500 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501 "%s: irq = %d.\n", __func__, pdev->irq);
3502 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3503 "%s: context->name = %s.\n", __func__,
3504 intr_context->name);
3505 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3506 "%s: dev_id = 0x%p.\n", __func__,
3507 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003508 status =
3509 request_irq(pdev->irq, qlge_isr,
3510 test_bit(QL_MSI_ENABLED,
3511 &qdev->
3512 flags) ? 0 : IRQF_SHARED,
3513 intr_context->name, &qdev->rx_ring[0]);
3514 if (status)
3515 goto err_irq;
3516
Joe Perchesae9540f72010-02-09 11:49:52 +00003517 netif_err(qdev, ifup, qdev->ndev,
3518 "Hooked intr %d, queue type %s, with name %s.\n",
3519 i,
3520 qdev->rx_ring[0].type == DEFAULT_Q ?
3521 "DEFAULT_Q" :
3522 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3523 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3524 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003525 }
3526 intr_context->hooked = 1;
3527 }
3528 return status;
3529err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003530 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003531 ql_free_irq(qdev);
3532 return status;
3533}
3534
3535static int ql_start_rss(struct ql_adapter *qdev)
3536{
Ron Mercer541ae282009-10-08 09:54:37 +00003537 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3538 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3539 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3540 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3541 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3542 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003543 struct ricb *ricb = &qdev->ricb;
3544 int status = 0;
3545 int i;
3546 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3547
Ron Mercere3324712009-07-02 06:06:13 +00003548 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549
Ron Mercerb2014ff2009-08-27 11:02:09 +00003550 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003551 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003552 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3553 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003554
3555 /*
3556 * Fill out the Indirection Table.
3557 */
Ron Mercer541ae282009-10-08 09:54:37 +00003558 for (i = 0; i < 1024; i++)
3559 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003560
Ron Mercer541ae282009-10-08 09:54:37 +00003561 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3562 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563
Joe Perchesae9540f72010-02-09 11:49:52 +00003564 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565
Ron Mercere3324712009-07-02 06:06:13 +00003566 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003567 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003568 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003569 return status;
3570 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003571 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3572 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003573 return status;
3574}
3575
Ron Mercera5f59dc2009-07-02 06:06:07 +00003576static int ql_clear_routing_entries(struct ql_adapter *qdev)
3577{
3578 int i, status = 0;
3579
3580 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3581 if (status)
3582 return status;
3583 /* Clear all the entries in the routing table. */
3584 for (i = 0; i < 16; i++) {
3585 status = ql_set_routing_reg(qdev, i, 0, 0);
3586 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003587 netif_err(qdev, ifup, qdev->ndev,
3588 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003589 break;
3590 }
3591 }
3592 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3593 return status;
3594}
3595
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003596/* Initialize the frame-to-queue routing. */
3597static int ql_route_initialize(struct ql_adapter *qdev)
3598{
3599 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003600
3601 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003602 status = ql_clear_routing_entries(qdev);
3603 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003604 return status;
3605
3606 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3607 if (status)
3608 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003609
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003610 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3611 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003612 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003613 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003614 "Failed to init routing register "
3615 "for IP CSUM error packets.\n");
3616 goto exit;
3617 }
3618 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3619 RT_IDX_TU_CSUM_ERR, 1);
3620 if (status) {
3621 netif_err(qdev, ifup, qdev->ndev,
3622 "Failed to init routing register "
3623 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003624 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003625 }
3626 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3627 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003628 netif_err(qdev, ifup, qdev->ndev,
3629 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003630 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003631 }
3632 /* If we have more than one inbound queue, then turn on RSS in the
3633 * routing block.
3634 */
3635 if (qdev->rss_ring_count > 1) {
3636 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3637 RT_IDX_RSS_MATCH, 1);
3638 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003639 netif_err(qdev, ifup, qdev->ndev,
3640 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003641 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003642 }
3643 }
3644
3645 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3646 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003647 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003648 netif_err(qdev, ifup, qdev->ndev,
3649 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003650exit:
3651 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003652 return status;
3653}
3654
Ron Mercer2ee1e272009-03-03 12:10:33 +00003655int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003656{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003657 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003658
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003659 /* If check if the link is up and use to
3660 * determine if we are setting or clearing
3661 * the MAC address in the CAM.
3662 */
3663 set = ql_read32(qdev, STS);
3664 set &= qdev->port_link_up;
3665 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003666 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003667 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003668 return status;
3669 }
3670
3671 status = ql_route_initialize(qdev);
3672 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003673 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003674
3675 return status;
3676}
3677
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003678static int ql_adapter_initialize(struct ql_adapter *qdev)
3679{
3680 u32 value, mask;
3681 int i;
3682 int status = 0;
3683
3684 /*
3685 * Set up the System register to halt on errors.
3686 */
3687 value = SYS_EFE | SYS_FAE;
3688 mask = value << 16;
3689 ql_write32(qdev, SYS, mask | value);
3690
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003691 /* Set the default queue, and VLAN behavior. */
3692 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3693 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003694 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3695
3696 /* Set the MPI interrupt to enabled. */
3697 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3698
3699 /* Enable the function, set pagesize, enable error checking. */
3700 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003701 FSC_EC | FSC_VM_PAGE_4K;
3702 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003703
3704 /* Set/clear header splitting. */
3705 mask = FSC_VM_PAGESIZE_MASK |
3706 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3707 ql_write32(qdev, FSC, mask | value);
3708
Ron Mercer572c5262010-01-02 10:37:42 +00003709 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003710
Ron Mercera3b71932009-10-08 09:54:38 +00003711 /* Set RX packet routing to use port/pci function on which the
3712 * packet arrived on in addition to usual frame routing.
3713 * This is helpful on bonding where both interfaces can have
3714 * the same MAC address.
3715 */
3716 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003717 /* Reroute all packets to our Interface.
3718 * They may have been routed to MPI firmware
3719 * due to WOL.
3720 */
3721 value = ql_read32(qdev, MGMT_RCV_CFG);
3722 value &= ~MGMT_RCV_CFG_RM;
3723 mask = 0xffff0000;
3724
3725 /* Sticky reg needs clearing due to WOL. */
3726 ql_write32(qdev, MGMT_RCV_CFG, mask);
3727 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3728
3729 /* Default WOL is enable on Mezz cards */
3730 if (qdev->pdev->subsystem_device == 0x0068 ||
3731 qdev->pdev->subsystem_device == 0x0180)
3732 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003733
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003734 /* Start up the rx queues. */
3735 for (i = 0; i < qdev->rx_ring_count; i++) {
3736 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3737 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003738 netif_err(qdev, ifup, qdev->ndev,
3739 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003740 return status;
3741 }
3742 }
3743
3744 /* If there is more than one inbound completion queue
3745 * then download a RICB to configure RSS.
3746 */
3747 if (qdev->rss_ring_count > 1) {
3748 status = ql_start_rss(qdev);
3749 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003750 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003751 return status;
3752 }
3753 }
3754
3755 /* Start up the tx queues. */
3756 for (i = 0; i < qdev->tx_ring_count; i++) {
3757 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3758 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003759 netif_err(qdev, ifup, qdev->ndev,
3760 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003761 return status;
3762 }
3763 }
3764
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003765 /* Initialize the port and set the max framesize. */
3766 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003767 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003768 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003769
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003770 /* Set up the MAC address and frame routing filter. */
3771 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003772 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003773 netif_err(qdev, ifup, qdev->ndev,
3774 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003775 return status;
3776 }
3777
3778 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003779 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003780 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3781 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003782 napi_enable(&qdev->rx_ring[i].napi);
3783 }
3784
3785 return status;
3786}
3787
3788/* Issue soft reset to chip. */
3789static int ql_adapter_reset(struct ql_adapter *qdev)
3790{
3791 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003792 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003793 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003794
Ron Mercera5f59dc2009-07-02 06:06:07 +00003795 /* Clear all the entries in the routing table. */
3796 status = ql_clear_routing_entries(qdev);
3797 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003798 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003799 return status;
3800 }
3801
3802 end_jiffies = jiffies +
3803 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003804
3805 /* Stop management traffic. */
3806 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3807
3808 /* Wait for the NIC and MGMNT FIFOs to empty. */
3809 ql_wait_fifo_empty(qdev);
3810
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003811 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003812
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 do {
3814 value = ql_read32(qdev, RST_FO);
3815 if ((value & RST_FO_FR) == 0)
3816 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003817 cpu_relax();
3818 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003819
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003820 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003821 netif_err(qdev, ifdown, qdev->ndev,
3822 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003823 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003824 }
3825
Ron Mercer84087f42009-10-08 09:54:41 +00003826 /* Resume management traffic. */
3827 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003828 return status;
3829}
3830
3831static void ql_display_dev_info(struct net_device *ndev)
3832{
3833 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3834
Joe Perchesae9540f72010-02-09 11:49:52 +00003835 netif_info(qdev, probe, qdev->ndev,
3836 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3837 "XG Roll = %d, XG Rev = %d.\n",
3838 qdev->func,
3839 qdev->port,
3840 qdev->chip_rev_id & 0x0000000f,
3841 qdev->chip_rev_id >> 4 & 0x0000000f,
3842 qdev->chip_rev_id >> 8 & 0x0000000f,
3843 qdev->chip_rev_id >> 12 & 0x0000000f);
3844 netif_info(qdev, probe, qdev->ndev,
3845 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003846}
3847
stephen hemmingerac409212010-10-21 07:50:54 +00003848static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003849{
3850 int status = 0;
3851 u32 wol = MB_WOL_DISABLE;
3852
3853 /* The CAM is still intact after a reset, but if we
3854 * are doing WOL, then we may need to program the
3855 * routing regs. We would also need to issue the mailbox
3856 * commands to instruct the MPI what to do per the ethtool
3857 * settings.
3858 */
3859
3860 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3861 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003862 netif_err(qdev, ifdown, qdev->ndev,
3863 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3864 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003865 return -EINVAL;
3866 }
3867
3868 if (qdev->wol & WAKE_MAGIC) {
3869 status = ql_mb_wol_set_magic(qdev, 1);
3870 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003871 netif_err(qdev, ifdown, qdev->ndev,
3872 "Failed to set magic packet on %s.\n",
3873 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003874 return status;
3875 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003876 netif_info(qdev, drv, qdev->ndev,
3877 "Enabled magic packet successfully on %s.\n",
3878 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003879
3880 wol |= MB_WOL_MAGIC_PKT;
3881 }
3882
3883 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003884 wol |= MB_WOL_MODE_ON;
3885 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003886 netif_err(qdev, drv, qdev->ndev,
3887 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003888 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003889 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003890 }
3891
3892 return status;
3893}
3894
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003895static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003896{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003897
Ron Mercer6497b602009-02-12 16:37:13 -08003898 /* Don't kill the reset worker thread if we
3899 * are in the process of recovery.
3900 */
3901 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3902 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003903 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3904 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003905 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003906 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003907 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003908}
3909
3910static int ql_adapter_down(struct ql_adapter *qdev)
3911{
3912 int i, status = 0;
3913
3914 ql_link_off(qdev);
3915
3916 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003917
Ron Mercer39aa8162009-08-27 11:02:11 +00003918 for (i = 0; i < qdev->rss_ring_count; i++)
3919 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003920
3921 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3922
3923 ql_disable_interrupts(qdev);
3924
3925 ql_tx_ring_clean(qdev);
3926
Ron Mercer6b318cb2009-03-09 10:59:26 +00003927 /* Call netif_napi_del() from common point.
3928 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003929 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003930 netif_napi_del(&qdev->rx_ring[i].napi);
3931
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003932 status = ql_adapter_reset(qdev);
3933 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003934 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3935 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003936 ql_free_rx_buffers(qdev);
3937
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003938 return status;
3939}
3940
3941static int ql_adapter_up(struct ql_adapter *qdev)
3942{
3943 int err = 0;
3944
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003945 err = ql_adapter_initialize(qdev);
3946 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003947 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003948 goto err_init;
3949 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003950 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003951 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003952 /* If the port is initialized and the
3953 * link is up the turn on the carrier.
3954 */
3955 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3956 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003957 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003958 /* Restore rx mode. */
3959 clear_bit(QL_ALLMULTI, &qdev->flags);
3960 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3961 qlge_set_multicast_list(qdev->ndev);
3962
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003963 ql_enable_interrupts(qdev);
3964 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003965 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003966
3967 return 0;
3968err_init:
3969 ql_adapter_reset(qdev);
3970 return err;
3971}
3972
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003973static void ql_release_adapter_resources(struct ql_adapter *qdev)
3974{
3975 ql_free_mem_resources(qdev);
3976 ql_free_irq(qdev);
3977}
3978
3979static int ql_get_adapter_resources(struct ql_adapter *qdev)
3980{
3981 int status = 0;
3982
3983 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003984 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003985 return -ENOMEM;
3986 }
3987 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003988 return status;
3989}
3990
3991static int qlge_close(struct net_device *ndev)
3992{
3993 struct ql_adapter *qdev = netdev_priv(ndev);
3994
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003995 /* If we hit pci_channel_io_perm_failure
3996 * failure condition, then we already
3997 * brought the adapter down.
3998 */
3999 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004000 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004001 clear_bit(QL_EEH_FATAL, &qdev->flags);
4002 return 0;
4003 }
4004
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004005 /*
4006 * Wait for device to recover from a reset.
4007 * (Rarely happens, but possible.)
4008 */
4009 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4010 msleep(1);
4011 ql_adapter_down(qdev);
4012 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004013 return 0;
4014}
4015
4016static int ql_configure_rings(struct ql_adapter *qdev)
4017{
4018 int i;
4019 struct rx_ring *rx_ring;
4020 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004021 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004022 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4023 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4024
4025 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004026
Ron Mercera4ab6132009-08-27 11:02:10 +00004027 /* In a perfect world we have one RSS ring for each CPU
4028 * and each has it's own vector. To do that we ask for
4029 * cpu_cnt vectors. ql_enable_msix() will adjust the
4030 * vector count to what we actually get. We then
4031 * allocate an RSS ring for each.
4032 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004033 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004034 qdev->intr_count = cpu_cnt;
4035 ql_enable_msix(qdev);
4036 /* Adjust the RSS ring count to the actual vector count. */
4037 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004038 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004039 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004040
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004041 for (i = 0; i < qdev->tx_ring_count; i++) {
4042 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004043 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004044 tx_ring->qdev = qdev;
4045 tx_ring->wq_id = i;
4046 tx_ring->wq_len = qdev->tx_ring_size;
4047 tx_ring->wq_size =
4048 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4049
4050 /*
4051 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004052 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004053 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004054 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004055 }
4056
4057 for (i = 0; i < qdev->rx_ring_count; i++) {
4058 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004059 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004060 rx_ring->qdev = qdev;
4061 rx_ring->cq_id = i;
4062 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004063 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004064 /*
4065 * Inbound (RSS) queues.
4066 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004067 rx_ring->cq_len = qdev->rx_ring_size;
4068 rx_ring->cq_size =
4069 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4070 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4071 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004072 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004073 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004074 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4075 "lbq_buf_size %d, order = %d\n",
4076 rx_ring->lbq_buf_size,
4077 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004078 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4079 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004080 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004081 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004082 rx_ring->type = RX_Q;
4083 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004084 /*
4085 * Outbound queue handles outbound completions only.
4086 */
4087 /* outbound cq is same size as tx_ring it services. */
4088 rx_ring->cq_len = qdev->tx_ring_size;
4089 rx_ring->cq_size =
4090 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4091 rx_ring->lbq_len = 0;
4092 rx_ring->lbq_size = 0;
4093 rx_ring->lbq_buf_size = 0;
4094 rx_ring->sbq_len = 0;
4095 rx_ring->sbq_size = 0;
4096 rx_ring->sbq_buf_size = 0;
4097 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004098 }
4099 }
4100 return 0;
4101}
4102
4103static int qlge_open(struct net_device *ndev)
4104{
4105 int err = 0;
4106 struct ql_adapter *qdev = netdev_priv(ndev);
4107
Ron Mercer74e12432009-11-11 12:54:04 +00004108 err = ql_adapter_reset(qdev);
4109 if (err)
4110 return err;
4111
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004112 err = ql_configure_rings(qdev);
4113 if (err)
4114 return err;
4115
4116 err = ql_get_adapter_resources(qdev);
4117 if (err)
4118 goto error_up;
4119
4120 err = ql_adapter_up(qdev);
4121 if (err)
4122 goto error_up;
4123
4124 return err;
4125
4126error_up:
4127 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004128 return err;
4129}
4130
Ron Mercer7c734352009-10-19 03:32:19 +00004131static int ql_change_rx_buffers(struct ql_adapter *qdev)
4132{
4133 struct rx_ring *rx_ring;
4134 int i, status;
4135 u32 lbq_buf_len;
4136
4137 /* Wait for an oustanding reset to complete. */
4138 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4139 int i = 3;
4140 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004141 netif_err(qdev, ifup, qdev->ndev,
4142 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004143 ssleep(1);
4144 }
4145
4146 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004147 netif_err(qdev, ifup, qdev->ndev,
4148 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004149 return -ETIMEDOUT;
4150 }
4151 }
4152
4153 status = ql_adapter_down(qdev);
4154 if (status)
4155 goto error;
4156
4157 /* Get the new rx buffer size. */
4158 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4159 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4160 qdev->lbq_buf_order = get_order(lbq_buf_len);
4161
4162 for (i = 0; i < qdev->rss_ring_count; i++) {
4163 rx_ring = &qdev->rx_ring[i];
4164 /* Set the new size. */
4165 rx_ring->lbq_buf_size = lbq_buf_len;
4166 }
4167
4168 status = ql_adapter_up(qdev);
4169 if (status)
4170 goto error;
4171
4172 return status;
4173error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004174 netif_alert(qdev, ifup, qdev->ndev,
4175 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004176 set_bit(QL_ADAPTER_UP, &qdev->flags);
4177 dev_close(qdev->ndev);
4178 return status;
4179}
4180
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004181static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4182{
4183 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004184 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004185
4186 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004187 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004188 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004189 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004190 } else
4191 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004192
4193 queue_delayed_work(qdev->workqueue,
4194 &qdev->mpi_port_cfg_work, 3*HZ);
4195
Breno Leitao746079d2010-02-04 10:11:19 +00004196 ndev->mtu = new_mtu;
4197
Ron Mercer7c734352009-10-19 03:32:19 +00004198 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004199 return 0;
4200 }
4201
Ron Mercer7c734352009-10-19 03:32:19 +00004202 status = ql_change_rx_buffers(qdev);
4203 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004204 netif_err(qdev, ifup, qdev->ndev,
4205 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004206 }
4207
4208 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004209}
4210
4211static struct net_device_stats *qlge_get_stats(struct net_device
4212 *ndev)
4213{
Ron Mercer885ee392009-11-03 13:49:31 +00004214 struct ql_adapter *qdev = netdev_priv(ndev);
4215 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4216 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4217 unsigned long pkts, mcast, dropped, errors, bytes;
4218 int i;
4219
4220 /* Get RX stats. */
4221 pkts = mcast = dropped = errors = bytes = 0;
4222 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4223 pkts += rx_ring->rx_packets;
4224 bytes += rx_ring->rx_bytes;
4225 dropped += rx_ring->rx_dropped;
4226 errors += rx_ring->rx_errors;
4227 mcast += rx_ring->rx_multicast;
4228 }
4229 ndev->stats.rx_packets = pkts;
4230 ndev->stats.rx_bytes = bytes;
4231 ndev->stats.rx_dropped = dropped;
4232 ndev->stats.rx_errors = errors;
4233 ndev->stats.multicast = mcast;
4234
4235 /* Get TX stats. */
4236 pkts = errors = bytes = 0;
4237 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4238 pkts += tx_ring->tx_packets;
4239 bytes += tx_ring->tx_bytes;
4240 errors += tx_ring->tx_errors;
4241 }
4242 ndev->stats.tx_packets = pkts;
4243 ndev->stats.tx_bytes = bytes;
4244 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004245 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004246}
4247
stephen hemmingerac409212010-10-21 07:50:54 +00004248static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004249{
4250 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004251 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004252 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004253
Ron Mercercc288f52009-02-23 10:42:14 +00004254 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4255 if (status)
4256 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004257 /*
4258 * Set or clear promiscuous mode if a
4259 * transition is taking place.
4260 */
4261 if (ndev->flags & IFF_PROMISC) {
4262 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4263 if (ql_set_routing_reg
4264 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004265 netif_err(qdev, hw, qdev->ndev,
4266 "Failed to set promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004267 } else {
4268 set_bit(QL_PROMISCUOUS, &qdev->flags);
4269 }
4270 }
4271 } else {
4272 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4273 if (ql_set_routing_reg
4274 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004275 netif_err(qdev, hw, qdev->ndev,
4276 "Failed to clear promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004277 } else {
4278 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4279 }
4280 }
4281 }
4282
4283 /*
4284 * Set or clear all multicast mode if a
4285 * transition is taking place.
4286 */
4287 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004288 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004289 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4290 if (ql_set_routing_reg
4291 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004292 netif_err(qdev, hw, qdev->ndev,
4293 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004294 } else {
4295 set_bit(QL_ALLMULTI, &qdev->flags);
4296 }
4297 }
4298 } else {
4299 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4300 if (ql_set_routing_reg
4301 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004302 netif_err(qdev, hw, qdev->ndev,
4303 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004304 } else {
4305 clear_bit(QL_ALLMULTI, &qdev->flags);
4306 }
4307 }
4308 }
4309
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004310 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004311 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4312 if (status)
4313 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004314 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004315 netdev_for_each_mc_addr(ha, ndev) {
4316 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004317 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004318 netif_err(qdev, hw, qdev->ndev,
4319 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004320 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004321 goto exit;
4322 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004323 i++;
4324 }
Ron Mercercc288f52009-02-23 10:42:14 +00004325 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004326 if (ql_set_routing_reg
4327 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004328 netif_err(qdev, hw, qdev->ndev,
4329 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004330 } else {
4331 set_bit(QL_ALLMULTI, &qdev->flags);
4332 }
4333 }
4334exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004335 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004336}
4337
4338static int qlge_set_mac_address(struct net_device *ndev, void *p)
4339{
4340 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4341 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004342 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004343
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004344 if (!is_valid_ether_addr(addr->sa_data))
4345 return -EADDRNOTAVAIL;
4346 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004347 /* Update local copy of current mac address. */
4348 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004349
Ron Mercercc288f52009-02-23 10:42:14 +00004350 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4351 if (status)
4352 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004353 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4354 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004355 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004356 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004357 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4358 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004359}
4360
4361static void qlge_tx_timeout(struct net_device *ndev)
4362{
4363 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004364 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004365}
4366
4367static void ql_asic_reset_work(struct work_struct *work)
4368{
4369 struct ql_adapter *qdev =
4370 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004371 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004372 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004373 status = ql_adapter_down(qdev);
4374 if (status)
4375 goto error;
4376
4377 status = ql_adapter_up(qdev);
4378 if (status)
4379 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004380
4381 /* Restore rx mode. */
4382 clear_bit(QL_ALLMULTI, &qdev->flags);
4383 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4384 qlge_set_multicast_list(qdev->ndev);
4385
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004386 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004387 return;
4388error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004389 netif_alert(qdev, ifup, qdev->ndev,
4390 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004391
Ron Mercerdb988122009-03-09 10:59:17 +00004392 set_bit(QL_ADAPTER_UP, &qdev->flags);
4393 dev_close(qdev->ndev);
4394 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004395}
4396
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004397static struct nic_operations qla8012_nic_ops = {
4398 .get_flash = ql_get_8012_flash_params,
4399 .port_initialize = ql_8012_port_initialize,
4400};
4401
Ron Mercercdca8d02009-03-02 08:07:31 +00004402static struct nic_operations qla8000_nic_ops = {
4403 .get_flash = ql_get_8000_flash_params,
4404 .port_initialize = ql_8000_port_initialize,
4405};
4406
Ron Mercere4552f52009-06-09 05:39:32 +00004407/* Find the pcie function number for the other NIC
4408 * on this chip. Since both NIC functions share a
4409 * common firmware we have the lowest enabled function
4410 * do any common work. Examples would be resetting
4411 * after a fatal firmware error, or doing a firmware
4412 * coredump.
4413 */
4414static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004415{
Ron Mercere4552f52009-06-09 05:39:32 +00004416 int status = 0;
4417 u32 temp;
4418 u32 nic_func1, nic_func2;
4419
4420 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4421 &temp);
4422 if (status)
4423 return status;
4424
4425 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4426 MPI_TEST_NIC_FUNC_MASK);
4427 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4428 MPI_TEST_NIC_FUNC_MASK);
4429
4430 if (qdev->func == nic_func1)
4431 qdev->alt_func = nic_func2;
4432 else if (qdev->func == nic_func2)
4433 qdev->alt_func = nic_func1;
4434 else
4435 status = -EIO;
4436
4437 return status;
4438}
4439
4440static int ql_get_board_info(struct ql_adapter *qdev)
4441{
4442 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004443 qdev->func =
4444 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004445 if (qdev->func > 3)
4446 return -EIO;
4447
4448 status = ql_get_alt_pcie_func(qdev);
4449 if (status)
4450 return status;
4451
4452 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4453 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004454 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4455 qdev->port_link_up = STS_PL1;
4456 qdev->port_init = STS_PI1;
4457 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4458 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4459 } else {
4460 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4461 qdev->port_link_up = STS_PL0;
4462 qdev->port_init = STS_PI0;
4463 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4464 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4465 }
4466 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004467 qdev->device_id = qdev->pdev->device;
4468 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4469 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004470 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4471 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004472 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004473}
4474
4475static void ql_release_all(struct pci_dev *pdev)
4476{
4477 struct net_device *ndev = pci_get_drvdata(pdev);
4478 struct ql_adapter *qdev = netdev_priv(ndev);
4479
4480 if (qdev->workqueue) {
4481 destroy_workqueue(qdev->workqueue);
4482 qdev->workqueue = NULL;
4483 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004484
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004485 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004486 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004487 if (qdev->doorbell_area)
4488 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004489 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004490 pci_release_regions(pdev);
4491 pci_set_drvdata(pdev, NULL);
4492}
4493
4494static int __devinit ql_init_device(struct pci_dev *pdev,
4495 struct net_device *ndev, int cards_found)
4496{
4497 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004498 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004499
Ron Mercere3324712009-07-02 06:06:13 +00004500 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004501 err = pci_enable_device(pdev);
4502 if (err) {
4503 dev_err(&pdev->dev, "PCI device enable failed.\n");
4504 return err;
4505 }
4506
Ron Mercerebd6e772009-09-29 08:39:25 +00004507 qdev->ndev = ndev;
4508 qdev->pdev = pdev;
4509 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004510
Ron Mercerbc9167f2009-10-10 09:35:04 +00004511 /* Set PCIe read request size */
4512 err = pcie_set_readrq(pdev, 4096);
4513 if (err) {
4514 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004515 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004516 }
4517
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004518 err = pci_request_regions(pdev, DRV_NAME);
4519 if (err) {
4520 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004521 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004522 }
4523
4524 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004525 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004526 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004527 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004528 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004529 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004531 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004532 }
4533
4534 if (err) {
4535 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004536 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004537 }
4538
Ron Mercer73475332009-11-06 07:44:58 +00004539 /* Set PCIe reset type for EEH to fundamental. */
4540 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004541 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004542 qdev->reg_base =
4543 ioremap_nocache(pci_resource_start(pdev, 1),
4544 pci_resource_len(pdev, 1));
4545 if (!qdev->reg_base) {
4546 dev_err(&pdev->dev, "Register mapping failed.\n");
4547 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004548 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549 }
4550
4551 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4552 qdev->doorbell_area =
4553 ioremap_nocache(pci_resource_start(pdev, 3),
4554 pci_resource_len(pdev, 3));
4555 if (!qdev->doorbell_area) {
4556 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4557 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004558 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004559 }
4560
Ron Mercere4552f52009-06-09 05:39:32 +00004561 err = ql_get_board_info(qdev);
4562 if (err) {
4563 dev_err(&pdev->dev, "Register access failed.\n");
4564 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004565 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004566 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004567 qdev->msg_enable = netif_msg_init(debug, default_msg);
4568 spin_lock_init(&qdev->hw_lock);
4569 spin_lock_init(&qdev->stats_lock);
4570
Ron Mercer8aae2602010-01-15 13:31:28 +00004571 if (qlge_mpi_coredump) {
4572 qdev->mpi_coredump =
4573 vmalloc(sizeof(struct ql_mpi_coredump));
4574 if (qdev->mpi_coredump == NULL) {
4575 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4576 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004577 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004578 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004579 if (qlge_force_coredump)
4580 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004581 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004583 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004584 if (err) {
4585 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004586 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 }
4588
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004589 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004590 /* Keep local copy of current mac address. */
4591 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004592
4593 /* Set up the default ring sizes. */
4594 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4595 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4596
4597 /* Set up the coalescing parameters. */
4598 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4599 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4600 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4601 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4602
4603 /*
4604 * Set up the operating parameters.
4605 */
4606 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004607 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4608 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4609 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4610 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004611 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004612 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004613 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004614 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004615
4616 if (!cards_found) {
4617 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4618 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4619 DRV_NAME, DRV_VERSION);
4620 }
4621 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004622err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004623 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004624err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004625 pci_disable_device(pdev);
4626 return err;
4627}
4628
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004629static const struct net_device_ops qlge_netdev_ops = {
4630 .ndo_open = qlge_open,
4631 .ndo_stop = qlge_close,
4632 .ndo_start_xmit = qlge_send,
4633 .ndo_change_mtu = qlge_change_mtu,
4634 .ndo_get_stats = qlge_get_stats,
4635 .ndo_set_multicast_list = qlge_set_multicast_list,
4636 .ndo_set_mac_address = qlge_set_mac_address,
4637 .ndo_validate_addr = eth_validate_addr,
4638 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004639 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4640 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4641 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004642};
4643
Ron Mercer15c052f2010-02-04 13:32:46 -08004644static void ql_timer(unsigned long data)
4645{
4646 struct ql_adapter *qdev = (struct ql_adapter *)data;
4647 u32 var = 0;
4648
4649 var = ql_read32(qdev, STS);
4650 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004651 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004652 return;
4653 }
4654
Breno Leitao72046d82010-07-01 03:00:17 +00004655 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004656}
4657
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004658static int __devinit qlge_probe(struct pci_dev *pdev,
4659 const struct pci_device_id *pci_entry)
4660{
4661 struct net_device *ndev = NULL;
4662 struct ql_adapter *qdev = NULL;
4663 static int cards_found = 0;
4664 int err = 0;
4665
Ron Mercer1e213302009-03-09 10:59:21 +00004666 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4667 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004668 if (!ndev)
4669 return -ENOMEM;
4670
4671 err = ql_init_device(pdev, ndev, cards_found);
4672 if (err < 0) {
4673 free_netdev(ndev);
4674 return err;
4675 }
4676
4677 qdev = netdev_priv(ndev);
4678 SET_NETDEV_DEV(ndev, &pdev->dev);
4679 ndev->features = (0
4680 | NETIF_F_IP_CSUM
4681 | NETIF_F_SG
4682 | NETIF_F_TSO
4683 | NETIF_F_TSO6
4684 | NETIF_F_TSO_ECN
4685 | NETIF_F_HW_VLAN_TX
4686 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004687 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004688
4689 if (test_bit(QL_DMA64, &qdev->flags))
4690 ndev->features |= NETIF_F_HIGHDMA;
4691
4692 /*
4693 * Set up net_device structure.
4694 */
4695 ndev->tx_queue_len = qdev->tx_ring_size;
4696 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004697
4698 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004699 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004700 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004701
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004702 err = register_netdev(ndev);
4703 if (err) {
4704 dev_err(&pdev->dev, "net device registration failed.\n");
4705 ql_release_all(pdev);
4706 pci_disable_device(pdev);
4707 return err;
4708 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004709 /* Start up the timer to trigger EEH if
4710 * the bus goes dead
4711 */
4712 init_timer_deferrable(&qdev->timer);
4713 qdev->timer.data = (unsigned long)qdev;
4714 qdev->timer.function = ql_timer;
4715 qdev->timer.expires = jiffies + (5*HZ);
4716 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004717 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004718 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004719 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004720 cards_found++;
4721 return 0;
4722}
4723
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004724netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4725{
4726 return qlge_send(skb, ndev);
4727}
4728
4729int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4730{
4731 return ql_clean_inbound_rx_ring(rx_ring, budget);
4732}
4733
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004734static void __devexit qlge_remove(struct pci_dev *pdev)
4735{
4736 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004737 struct ql_adapter *qdev = netdev_priv(ndev);
4738 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004739 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004740 unregister_netdev(ndev);
4741 ql_release_all(pdev);
4742 pci_disable_device(pdev);
4743 free_netdev(ndev);
4744}
4745
Ron Mercer6d190c62009-10-28 08:39:20 +00004746/* Clean up resources without touching hardware. */
4747static void ql_eeh_close(struct net_device *ndev)
4748{
4749 int i;
4750 struct ql_adapter *qdev = netdev_priv(ndev);
4751
4752 if (netif_carrier_ok(ndev)) {
4753 netif_carrier_off(ndev);
4754 netif_stop_queue(ndev);
4755 }
4756
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004757 /* Disabling the timer */
4758 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004759 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004760
4761 for (i = 0; i < qdev->rss_ring_count; i++)
4762 netif_napi_del(&qdev->rx_ring[i].napi);
4763
4764 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4765 ql_tx_ring_clean(qdev);
4766 ql_free_rx_buffers(qdev);
4767 ql_release_adapter_resources(qdev);
4768}
4769
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004770/*
4771 * This callback is called by the PCI subsystem whenever
4772 * a PCI bus error is detected.
4773 */
4774static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4775 enum pci_channel_state state)
4776{
4777 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004778 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004779
Ron Mercer6d190c62009-10-28 08:39:20 +00004780 switch (state) {
4781 case pci_channel_io_normal:
4782 return PCI_ERS_RESULT_CAN_RECOVER;
4783 case pci_channel_io_frozen:
4784 netif_device_detach(ndev);
4785 if (netif_running(ndev))
4786 ql_eeh_close(ndev);
4787 pci_disable_device(pdev);
4788 return PCI_ERS_RESULT_NEED_RESET;
4789 case pci_channel_io_perm_failure:
4790 dev_err(&pdev->dev,
4791 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004792 ql_eeh_close(ndev);
4793 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004794 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004795 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004796
4797 /* Request a slot reset. */
4798 return PCI_ERS_RESULT_NEED_RESET;
4799}
4800
4801/*
4802 * This callback is called after the PCI buss has been reset.
4803 * Basically, this tries to restart the card from scratch.
4804 * This is a shortened version of the device probe/discovery code,
4805 * it resembles the first-half of the () routine.
4806 */
4807static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4808{
4809 struct net_device *ndev = pci_get_drvdata(pdev);
4810 struct ql_adapter *qdev = netdev_priv(ndev);
4811
Ron Mercer6d190c62009-10-28 08:39:20 +00004812 pdev->error_state = pci_channel_io_normal;
4813
4814 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004815 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004816 netif_err(qdev, ifup, qdev->ndev,
4817 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004818 return PCI_ERS_RESULT_DISCONNECT;
4819 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004820 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004821
4822 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004823 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004824 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004825 return PCI_ERS_RESULT_DISCONNECT;
4826 }
4827
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004828 return PCI_ERS_RESULT_RECOVERED;
4829}
4830
4831static void qlge_io_resume(struct pci_dev *pdev)
4832{
4833 struct net_device *ndev = pci_get_drvdata(pdev);
4834 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004835 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004836
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004837 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004838 err = qlge_open(ndev);
4839 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004840 netif_err(qdev, ifup, qdev->ndev,
4841 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004842 return;
4843 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004844 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004845 netif_err(qdev, ifup, qdev->ndev,
4846 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004847 }
Breno Leitao72046d82010-07-01 03:00:17 +00004848 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004849 netif_device_attach(ndev);
4850}
4851
4852static struct pci_error_handlers qlge_err_handler = {
4853 .error_detected = qlge_io_error_detected,
4854 .slot_reset = qlge_io_slot_reset,
4855 .resume = qlge_io_resume,
4856};
4857
4858static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4859{
4860 struct net_device *ndev = pci_get_drvdata(pdev);
4861 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004862 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004863
4864 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004865 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004866
4867 if (netif_running(ndev)) {
4868 err = ql_adapter_down(qdev);
4869 if (!err)
4870 return err;
4871 }
4872
Ron Mercerbc083ce2009-10-21 11:07:40 +00004873 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004874 err = pci_save_state(pdev);
4875 if (err)
4876 return err;
4877
4878 pci_disable_device(pdev);
4879
4880 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4881
4882 return 0;
4883}
4884
David S. Miller04da2cf2008-09-19 16:14:24 -07004885#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004886static int qlge_resume(struct pci_dev *pdev)
4887{
4888 struct net_device *ndev = pci_get_drvdata(pdev);
4889 struct ql_adapter *qdev = netdev_priv(ndev);
4890 int err;
4891
4892 pci_set_power_state(pdev, PCI_D0);
4893 pci_restore_state(pdev);
4894 err = pci_enable_device(pdev);
4895 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004896 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004897 return err;
4898 }
4899 pci_set_master(pdev);
4900
4901 pci_enable_wake(pdev, PCI_D3hot, 0);
4902 pci_enable_wake(pdev, PCI_D3cold, 0);
4903
4904 if (netif_running(ndev)) {
4905 err = ql_adapter_up(qdev);
4906 if (err)
4907 return err;
4908 }
4909
Breno Leitao72046d82010-07-01 03:00:17 +00004910 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004911 netif_device_attach(ndev);
4912
4913 return 0;
4914}
David S. Miller04da2cf2008-09-19 16:14:24 -07004915#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004916
4917static void qlge_shutdown(struct pci_dev *pdev)
4918{
4919 qlge_suspend(pdev, PMSG_SUSPEND);
4920}
4921
4922static struct pci_driver qlge_driver = {
4923 .name = DRV_NAME,
4924 .id_table = qlge_pci_tbl,
4925 .probe = qlge_probe,
4926 .remove = __devexit_p(qlge_remove),
4927#ifdef CONFIG_PM
4928 .suspend = qlge_suspend,
4929 .resume = qlge_resume,
4930#endif
4931 .shutdown = qlge_shutdown,
4932 .err_handler = &qlge_err_handler
4933};
4934
4935static int __init qlge_init_module(void)
4936{
4937 return pci_register_driver(&qlge_driver);
4938}
4939
4940static void __exit qlge_exit(void)
4941{
4942 pci_unregister_driver(&qlge_driver);
4943}
4944
4945module_init(qlge_init_module);
4946module_exit(qlge_exit);