blob: e39451b5196ef01d5bb56f8a2754db8b0bc74444 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
Ron Mercer8aae2602010-01-15 13:31:28 +000076static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000080 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000087
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000088static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000089 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040091 /* required last entry */
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
100 */
101static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102{
103 u32 sem_bits = 0;
104
105 switch (sem_mask) {
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 break;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 break;
112 case SEM_ICB_MASK:
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 break;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 break;
118 case SEM_FLASH_MASK:
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 break;
121 case SEM_PROBE_MASK:
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 break;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 break;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 break;
130 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400132 return -EINVAL;
133 }
134
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
137}
138
139int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000141 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400142 do {
143 if (!ql_sem_trylock(qdev, sem_mask))
144 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000145 udelay(100);
146 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 return -ETIMEDOUT;
148}
149
150void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151{
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
154}
155
156/* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160 */
161int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162{
163 u32 temp;
164 int count = UDELAY_COUNT;
165
166 while (count) {
167 temp = ql_read32(qdev, reg);
168
169 /* check for errors */
170 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000171 netif_alert(qdev, probe, qdev->ndev,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
173 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400174 return -EIO;
175 } else if (temp & bit)
176 return 0;
177 udelay(UDELAY_DELAY);
178 count--;
179 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000180 netif_alert(qdev, probe, qdev->ndev,
181 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400182 return -ETIMEDOUT;
183}
184
185/* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
187 */
188static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189{
190 int count = UDELAY_COUNT;
191 u32 temp;
192
193 while (count) {
194 temp = ql_read32(qdev, CFG);
195 if (temp & CFG_LE)
196 return -EIO;
197 if (!(temp & bit))
198 return 0;
199 udelay(UDELAY_DELAY);
200 count--;
201 }
202 return -ETIMEDOUT;
203}
204
205
206/* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
208 */
209int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211{
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 direction =
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220 PCI_DMA_FROMDEVICE;
221
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400225 return -ENOMEM;
226 }
227
Ron Mercer4322c5b2009-07-02 06:06:06 +0000228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 if (status)
230 return status;
231
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400232 status = ql_wait_cfg(qdev, bit);
233 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400236 goto exit;
237 }
238
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
245
246 /*
247 * Wait for the bit to clear after signaling hw.
248 */
249 status = ql_wait_cfg(qdev, bit);
250exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400252 pci_unmap_single(qdev->pdev, map, size, direction);
253 return status;
254}
255
256/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258 u32 *value)
259{
260 u32 offset = 0;
261 int status;
262
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400263 switch (type) {
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
266 {
267 status =
268 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400270 if (status)
271 goto exit;
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275 status =
276 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400278 if (status)
279 goto exit;
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289 status =
290 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400292 if (status)
293 goto exit;
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
296 status =
297 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400299 if (status)
300 goto exit;
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304 status =
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800306 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400307 if (status)
308 goto exit;
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310 }
311 break;
312 }
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
315 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000316 netif_crit(qdev, ifup, qdev->ndev,
317 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400318 status = -EPERM;
319 }
320exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 return status;
322}
323
324/* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
326 */
327static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328 u16 index)
329{
330 u32 offset = 0;
331 int status = 0;
332
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400333 switch (type) {
334 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000335 {
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
339
340 status =
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 if (status)
344 goto exit;
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
347 type | MAC_ADDR_E);
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
349 status =
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 if (status)
353 goto exit;
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
356 type | MAC_ADDR_E);
357
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
359 status =
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 if (status)
363 goto exit;
364 break;
365 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400366 case MAC_ADDR_TYPE_CAM_MAC:
367 {
368 u32 cam_output;
369 u32 upper = (addr[0] << 8) | addr[1];
370 u32 lower =
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372 (addr[5]);
373
Joe Perchesae9540f72010-02-09 11:49:52 +0000374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375 "Adding %s address %pM at index %d in the CAM.\n",
376 type == MAC_ADDR_TYPE_MULTI_MAC ?
377 "MULTICAST" : "UNICAST",
378 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400379
380 status =
381 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383 if (status)
384 goto exit;
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 type); /* type */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 status =
390 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400392 if (status)
393 goto exit;
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 type); /* type */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 status =
399 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400401 if (status)
402 goto exit;
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 type); /* type */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
409 */
Ron Mercer76b26692009-10-08 09:54:40 +0000410 cam_output = (CAM_OUT_ROUTE_NIC |
411 (qdev->
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
414 if (qdev->vlgrp)
415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400418 break;
419 }
420 case MAC_ADDR_TYPE_VLAN:
421 {
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
427 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000428 netif_info(qdev, ifup, qdev->ndev,
429 "%s VLAN ID %d %s the CAM.\n",
430 enable_bit ? "Adding" : "Removing",
431 index,
432 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400433
434 status =
435 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800436 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400437 if (status)
438 goto exit;
439 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440 (index << MAC_ADDR_IDX_SHIFT) | /* index */
441 type | /* type */
442 enable_bit); /* enable/disable */
443 break;
444 }
445 case MAC_ADDR_TYPE_MULTI_FLTR:
446 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000447 netif_crit(qdev, ifup, qdev->ndev,
448 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400449 status = -EPERM;
450 }
451exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400452 return status;
453}
454
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000455/* Set or clear MAC address in hardware. We sometimes
456 * have to clear it to prevent wrong frame routing
457 * especially in a bonding environment.
458 */
459static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
460{
461 int status;
462 char zero_mac_addr[ETH_ALEN];
463 char *addr;
464
465 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000466 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000469 } else {
470 memset(zero_mac_addr, 0, ETH_ALEN);
471 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000474 }
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476 if (status)
477 return status;
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000482 netif_err(qdev, ifup, qdev->ndev,
483 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000484 return status;
485}
486
Ron Mercer6a473302009-07-02 06:06:12 +0000487void ql_link_on(struct ql_adapter *qdev)
488{
Joe Perchesae9540f72010-02-09 11:49:52 +0000489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000490 netif_carrier_on(qdev->ndev);
491 ql_set_mac_addr(qdev, 1);
492}
493
494void ql_link_off(struct ql_adapter *qdev)
495{
Joe Perchesae9540f72010-02-09 11:49:52 +0000496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000497 netif_carrier_off(qdev->ndev);
498 ql_set_mac_addr(qdev, 0);
499}
500
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501/* Get a specific frame routing value from the CAM.
502 * Used for debug and reg dump.
503 */
504int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
505{
506 int status = 0;
507
Ron Mercer939678f2009-01-04 17:08:29 -0800508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400509 if (status)
510 goto exit;
511
512 ql_write32(qdev, RT_IDX,
513 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400515 if (status)
516 goto exit;
517 *value = ql_read32(qdev, RT_DATA);
518exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400519 return status;
520}
521
522/* The NIC function for this chip has 16 routing indexes. Each one can be used
523 * to route different frame types to various inbound queues. We send broadcast/
524 * multicast/error frames to the default queue for slow handling,
525 * and CAM hit/RSS frames to the fast handling queues.
526 */
527static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
528 int enable)
529{
Ron Mercer8587ea32009-02-23 10:42:15 +0000530 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400531 u32 value = 0;
532
Joe Perchesae9540f72010-02-09 11:49:52 +0000533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534 "%s %s mask %s the routing reg.\n",
535 enable ? "Adding" : "Removing",
536 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549 index == RT_IDX_UNUSED013 ? "UNUSED13" :
550 index == RT_IDX_UNUSED014 ? "UNUSED14" :
551 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552 "(Bad index != RT_IDX)",
553 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400554
555 switch (mask) {
556 case RT_IDX_CAM_HIT:
557 {
558 value = RT_IDX_DST_CAM_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
561 break;
562 }
563 case RT_IDX_VALID: /* Promiscuous Mode frames. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
571 {
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
578 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
585 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000586 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
592 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000593 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400594 RT_IDX_TYPE_NICQ | /* type */
595 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
599 {
600 value = RT_IDX_DST_RSS | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case 0: /* Clear the E-bit on an entry. */
606 {
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (index << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000613 netif_err(qdev, ifup, qdev->ndev,
614 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 status = -EPERM;
616 goto exit;
617 }
618
619 if (value) {
620 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
621 if (status)
622 goto exit;
623 value |= (enable ? RT_IDX_E : 0);
624 ql_write32(qdev, RT_IDX, value);
625 ql_write32(qdev, RT_DATA, enable ? mask : 0);
626 }
627exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400628 return status;
629}
630
631static void ql_enable_interrupts(struct ql_adapter *qdev)
632{
633 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
634}
635
636static void ql_disable_interrupts(struct ql_adapter *qdev)
637{
638 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
639}
640
641/* If we're running with multiple MSI-X vectors then we enable on the fly.
642 * Otherwise, we may have multiple outstanding workers and don't want to
643 * enable until the last one finishes. In this case, the irq_cnt gets
644 * incremented everytime we queue a worker and decremented everytime
645 * a worker finishes. Once it hits zero we enable the interrupt.
646 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700647u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400648{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700649 u32 var = 0;
650 unsigned long hw_flags = 0;
651 struct intr_context *ctx = qdev->intr_context + intr;
652
653 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
654 /* Always enable if we're MSIX multi interrupts and
655 * it's not the default (zeroeth) interrupt.
656 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400661 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662
663 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
664 if (atomic_dec_and_test(&ctx->irq_cnt)) {
665 ql_write32(qdev, INTR_EN,
666 ctx->intr_en_mask);
667 var = ql_read32(qdev, STS);
668 }
669 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
670 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400671}
672
673static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
674{
675 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700676 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 /* HW disables for us if we're MSIX multi interrupts and
679 * it's not the default (zeroeth) interrupt.
680 */
681 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
682 return 0;
683
684 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000685 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700686 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400687 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700688 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400689 var = ql_read32(qdev, STS);
690 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700691 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000692 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 return var;
694}
695
696static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
697{
698 int i;
699 for (i = 0; i < qdev->intr_count; i++) {
700 /* The enable call does a atomic_dec_and_test
701 * and enables only if the result is zero.
702 * So we precharge it here.
703 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700704 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
705 i == 0))
706 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400707 ql_enable_completion_interrupt(qdev, i);
708 }
709
710}
711
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000712static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
713{
714 int status, i;
715 u16 csum = 0;
716 __le16 *flash = (__le16 *)&qdev->flash;
717
718 status = strncmp((char *)&qdev->flash, str, 4);
719 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000720 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000721 return status;
722 }
723
724 for (i = 0; i < size; i++)
725 csum += le16_to_cpu(*flash++);
726
727 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000728 netif_err(qdev, ifup, qdev->ndev,
729 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000730
731 return csum;
732}
733
Ron Mercer26351472009-02-02 13:53:57 -0800734static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400735{
736 int status = 0;
737 /* wait for reg to come ready */
738 status = ql_wait_reg_rdy(qdev,
739 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
740 if (status)
741 goto exit;
742 /* set up for reg read */
743 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
744 /* wait for reg to come ready */
745 status = ql_wait_reg_rdy(qdev,
746 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
747 if (status)
748 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800749 /* This data is stored on flash as an array of
750 * __le32. Since ql_read32() returns cpu endian
751 * we need to swap it back.
752 */
753 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400754exit:
755 return status;
756}
757
Ron Mercercdca8d02009-03-02 08:07:31 +0000758static int ql_get_8000_flash_params(struct ql_adapter *qdev)
759{
760 u32 i, size;
761 int status;
762 __le32 *p = (__le32 *)&qdev->flash;
763 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000764 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000765
766 /* Get flash offset for function and adjust
767 * for dword access.
768 */
Ron Mercere4552f52009-06-09 05:39:32 +0000769 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000770 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
771 else
772 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
773
774 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
775 return -ETIMEDOUT;
776
777 size = sizeof(struct flash_params_8000) / sizeof(u32);
778 for (i = 0; i < size; i++, p++) {
779 status = ql_read_flash_word(qdev, i+offset, p);
780 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000781 netif_err(qdev, ifup, qdev->ndev,
782 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000783 goto exit;
784 }
785 }
786
787 status = ql_validate_flash(qdev,
788 sizeof(struct flash_params_8000) / sizeof(u16),
789 "8000");
790 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000791 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000792 status = -EINVAL;
793 goto exit;
794 }
795
Ron Mercer542512e2009-06-09 05:39:33 +0000796 /* Extract either manufacturer or BOFM modified
797 * MAC address.
798 */
799 if (qdev->flash.flash_params_8000.data_type1 == 2)
800 memcpy(mac_addr,
801 qdev->flash.flash_params_8000.mac_addr1,
802 qdev->ndev->addr_len);
803 else
804 memcpy(mac_addr,
805 qdev->flash.flash_params_8000.mac_addr,
806 qdev->ndev->addr_len);
807
808 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000809 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000810 status = -EINVAL;
811 goto exit;
812 }
813
814 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000815 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000816 qdev->ndev->addr_len);
817
818exit:
819 ql_sem_unlock(qdev, SEM_FLASH_MASK);
820 return status;
821}
822
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000823static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400824{
825 int i;
826 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800827 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800828 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000829 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800830
831 /* Second function's parameters follow the first
832 * function's.
833 */
Ron Mercere4552f52009-06-09 05:39:32 +0000834 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400836
837 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
838 return -ETIMEDOUT;
839
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000840 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800841 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400842 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000843 netif_err(qdev, ifup, qdev->ndev,
844 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845 goto exit;
846 }
847
848 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000849
850 status = ql_validate_flash(qdev,
851 sizeof(struct flash_params_8012) / sizeof(u16),
852 "8012");
853 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000854 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000855 status = -EINVAL;
856 goto exit;
857 }
858
859 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
860 status = -EINVAL;
861 goto exit;
862 }
863
864 memcpy(qdev->ndev->dev_addr,
865 qdev->flash.flash_params_8012.mac_addr,
866 qdev->ndev->addr_len);
867
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400868exit:
869 ql_sem_unlock(qdev, SEM_FLASH_MASK);
870 return status;
871}
872
873/* xgmac register are located behind the xgmac_addr and xgmac_data
874 * register pair. Each read/write requires us to wait for the ready
875 * bit before reading/writing the data.
876 */
877static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
878{
879 int status;
880 /* wait for reg to come ready */
881 status = ql_wait_reg_rdy(qdev,
882 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
883 if (status)
884 return status;
885 /* write the data to the data reg */
886 ql_write32(qdev, XGMAC_DATA, data);
887 /* trigger the write */
888 ql_write32(qdev, XGMAC_ADDR, reg);
889 return status;
890}
891
892/* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
895 */
896int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
897{
898 int status = 0;
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902 if (status)
903 goto exit;
904 /* set up for reg read */
905 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
906 /* wait for reg to come ready */
907 status = ql_wait_reg_rdy(qdev,
908 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
909 if (status)
910 goto exit;
911 /* get the data */
912 *data = ql_read32(qdev, XGMAC_DATA);
913exit:
914 return status;
915}
916
917/* This is used for reading the 64-bit statistics regs. */
918int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
919{
920 int status = 0;
921 u32 hi = 0;
922 u32 lo = 0;
923
924 status = ql_read_xgmac_reg(qdev, reg, &lo);
925 if (status)
926 goto exit;
927
928 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
929 if (status)
930 goto exit;
931
932 *data = (u64) lo | ((u64) hi << 32);
933
934exit:
935 return status;
936}
937
Ron Mercercdca8d02009-03-02 08:07:31 +0000938static int ql_8000_port_initialize(struct ql_adapter *qdev)
939{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000940 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000941 /*
942 * Get MPI firmware version for driver banner
943 * and ethool info.
944 */
945 status = ql_mb_about_fw(qdev);
946 if (status)
947 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000948 status = ql_mb_get_fw_state(qdev);
949 if (status)
950 goto exit;
951 /* Wake up a worker to get/set the TX/RX frame sizes. */
952 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
953exit:
954 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000955}
956
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400957/* Take the MAC Core out of reset.
958 * Enable statistics counting.
959 * Take the transmitter/receiver out of reset.
960 * This functionality may be done in the MPI firmware at a
961 * later date.
962 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000963static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964{
965 int status = 0;
966 u32 data;
967
968 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
969 /* Another function has the semaphore, so
970 * wait for the port init bit to come ready.
971 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000972 netif_info(qdev, link, qdev->ndev,
973 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400974 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
975 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000976 netif_crit(qdev, link, qdev->ndev,
977 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400978 }
979 return status;
980 }
981
Joe Perchesae9540f72010-02-09 11:49:52 +0000982 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400983 /* Set the core reset. */
984 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
985 if (status)
986 goto end;
987 data |= GLOBAL_CFG_RESET;
988 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
989 if (status)
990 goto end;
991
992 /* Clear the core reset and turn on jumbo for receiver. */
993 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
994 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
995 data |= GLOBAL_CFG_TX_STAT_EN;
996 data |= GLOBAL_CFG_RX_STAT_EN;
997 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable transmitter, and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1006 data |= TX_CFG_EN; /* Enable the transmitter. */
1007 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Enable receiver and clear it's reset. */
1012 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1013 if (status)
1014 goto end;
1015 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1016 data |= RX_CFG_EN; /* Enable the receiver. */
1017 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1018 if (status)
1019 goto end;
1020
1021 /* Turn on jumbo. */
1022 status =
1023 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1024 if (status)
1025 goto end;
1026 status =
1027 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1028 if (status)
1029 goto end;
1030
1031 /* Signal to the world that the port is enabled. */
1032 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1033end:
1034 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1035 return status;
1036}
1037
Ron Mercer7c734352009-10-19 03:32:19 +00001038static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1039{
1040 return PAGE_SIZE << qdev->lbq_buf_order;
1041}
1042
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001043/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001044static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001045{
1046 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1047 rx_ring->lbq_curr_idx++;
1048 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1049 rx_ring->lbq_curr_idx = 0;
1050 rx_ring->lbq_free_cnt++;
1051 return lbq_desc;
1052}
1053
Ron Mercer7c734352009-10-19 03:32:19 +00001054static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1055 struct rx_ring *rx_ring)
1056{
1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1058
1059 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001060 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001061 rx_ring->lbq_buf_size,
1062 PCI_DMA_FROMDEVICE);
1063
1064 /* If it's the last chunk of our master page then
1065 * we unmap it.
1066 */
1067 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1068 == ql_lbq_block_size(qdev))
1069 pci_unmap_page(qdev->pdev,
1070 lbq_desc->p.pg_chunk.map,
1071 ql_lbq_block_size(qdev),
1072 PCI_DMA_FROMDEVICE);
1073 return lbq_desc;
1074}
1075
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001076/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001077static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001078{
1079 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1080 rx_ring->sbq_curr_idx++;
1081 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1082 rx_ring->sbq_curr_idx = 0;
1083 rx_ring->sbq_free_cnt++;
1084 return sbq_desc;
1085}
1086
1087/* Update an rx ring index. */
1088static void ql_update_cq(struct rx_ring *rx_ring)
1089{
1090 rx_ring->cnsmr_idx++;
1091 rx_ring->curr_entry++;
1092 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1093 rx_ring->cnsmr_idx = 0;
1094 rx_ring->curr_entry = rx_ring->cq_base;
1095 }
1096}
1097
1098static void ql_write_cq_idx(struct rx_ring *rx_ring)
1099{
1100 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1101}
1102
Ron Mercer7c734352009-10-19 03:32:19 +00001103static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1104 struct bq_desc *lbq_desc)
1105{
1106 if (!rx_ring->pg_chunk.page) {
1107 u64 map;
1108 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1109 GFP_ATOMIC,
1110 qdev->lbq_buf_order);
1111 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001112 netif_err(qdev, drv, qdev->ndev,
1113 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001114 return -ENOMEM;
1115 }
1116 rx_ring->pg_chunk.offset = 0;
1117 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1118 0, ql_lbq_block_size(qdev),
1119 PCI_DMA_FROMDEVICE);
1120 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121 __free_pages(rx_ring->pg_chunk.page,
1122 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001123 netif_err(qdev, drv, qdev->ndev,
1124 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001125 return -ENOMEM;
1126 }
1127 rx_ring->pg_chunk.map = map;
1128 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1129 }
1130
1131 /* Copy the current master pg_chunk info
1132 * to the current descriptor.
1133 */
1134 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1135
1136 /* Adjust the master page chunk for next
1137 * buffer get.
1138 */
1139 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1140 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1141 rx_ring->pg_chunk.page = NULL;
1142 lbq_desc->p.pg_chunk.last_flag = 1;
1143 } else {
1144 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1145 get_page(rx_ring->pg_chunk.page);
1146 lbq_desc->p.pg_chunk.last_flag = 0;
1147 }
1148 return 0;
1149}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150/* Process (refill) a large buffer queue. */
1151static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1152{
Ron Mercer49f21862009-02-23 10:42:16 +00001153 u32 clean_idx = rx_ring->lbq_clean_idx;
1154 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001155 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001156 u64 map;
1157 int i;
1158
Ron Mercer7c734352009-10-19 03:32:19 +00001159 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001160 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1162 "lbq: try cleaning clean_idx = %d.\n",
1163 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001164 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001165 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001166 netif_err(qdev, ifup, qdev->ndev,
1167 "Could not get a page chunk.\n");
1168 return;
1169 }
Ron Mercer7c734352009-10-19 03:32:19 +00001170
1171 map = lbq_desc->p.pg_chunk.map +
1172 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001173 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1174 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001175 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001176 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001177
1178 pci_dma_sync_single_for_device(qdev->pdev, map,
1179 rx_ring->lbq_buf_size,
1180 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001181 clean_idx++;
1182 if (clean_idx == rx_ring->lbq_len)
1183 clean_idx = 0;
1184 }
1185
1186 rx_ring->lbq_clean_idx = clean_idx;
1187 rx_ring->lbq_prod_idx += 16;
1188 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1189 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001190 rx_ring->lbq_free_cnt -= 16;
1191 }
1192
1193 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001194 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1195 "lbq: updating prod idx = %d.\n",
1196 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 ql_write_db_reg(rx_ring->lbq_prod_idx,
1198 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001199 }
1200}
1201
1202/* Process (refill) a small buffer queue. */
1203static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1204{
Ron Mercer49f21862009-02-23 10:42:16 +00001205 u32 clean_idx = rx_ring->sbq_clean_idx;
1206 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001208 u64 map;
1209 int i;
1210
1211 while (rx_ring->sbq_free_cnt > 16) {
1212 for (i = 0; i < 16; i++) {
1213 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1215 "sbq: try cleaning clean_idx = %d.\n",
1216 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001217 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001218 netif_printk(qdev, rx_status, KERN_DEBUG,
1219 qdev->ndev,
1220 "sbq: getting new skb for index %d.\n",
1221 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001222 sbq_desc->p.skb =
1223 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001224 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001225 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001226 netif_err(qdev, probe, qdev->ndev,
1227 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 rx_ring->sbq_clean_idx = clean_idx;
1229 return;
1230 }
1231 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1232 map = pci_map_single(qdev->pdev,
1233 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size,
1235 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001236 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001237 netif_err(qdev, ifup, qdev->ndev,
1238 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001239 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001240 dev_kfree_skb_any(sbq_desc->p.skb);
1241 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001242 return;
1243 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001244 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1245 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001246 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001247 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001248 }
1249
1250 clean_idx++;
1251 if (clean_idx == rx_ring->sbq_len)
1252 clean_idx = 0;
1253 }
1254 rx_ring->sbq_clean_idx = clean_idx;
1255 rx_ring->sbq_prod_idx += 16;
1256 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1257 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001258 rx_ring->sbq_free_cnt -= 16;
1259 }
1260
1261 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001262 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1263 "sbq: updating prod idx = %d.\n",
1264 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001265 ql_write_db_reg(rx_ring->sbq_prod_idx,
1266 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001267 }
1268}
1269
1270static void ql_update_buffer_queues(struct ql_adapter *qdev,
1271 struct rx_ring *rx_ring)
1272{
1273 ql_update_sbq(qdev, rx_ring);
1274 ql_update_lbq(qdev, rx_ring);
1275}
1276
1277/* Unmaps tx buffers. Can be called from send() if a pci mapping
1278 * fails at some stage, or from the interrupt when a tx completes.
1279 */
1280static void ql_unmap_send(struct ql_adapter *qdev,
1281 struct tx_ring_desc *tx_ring_desc, int mapped)
1282{
1283 int i;
1284 for (i = 0; i < mapped; i++) {
1285 if (i == 0 || (i == 7 && mapped > 7)) {
1286 /*
1287 * Unmap the skb->data area, or the
1288 * external sglist (AKA the Outbound
1289 * Address List (OAL)).
1290 * If its the zeroeth element, then it's
1291 * the skb->data area. If it's the 7th
1292 * element and there is more than 6 frags,
1293 * then its an OAL.
1294 */
1295 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001296 netif_printk(qdev, tx_done, KERN_DEBUG,
1297 qdev->ndev,
1298 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 }
1300 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001301 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001302 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001303 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001304 maplen),
1305 PCI_DMA_TODEVICE);
1306 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1308 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001309 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001310 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001311 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001312 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001313 maplen), PCI_DMA_TODEVICE);
1314 }
1315 }
1316
1317}
1318
1319/* Map the buffers for this transmit. This will return
1320 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1321 */
1322static int ql_map_send(struct ql_adapter *qdev,
1323 struct ob_mac_iocb_req *mac_iocb_ptr,
1324 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1325{
1326 int len = skb_headlen(skb);
1327 dma_addr_t map;
1328 int frag_idx, err, map_idx = 0;
1329 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1330 int frag_cnt = skb_shinfo(skb)->nr_frags;
1331
1332 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001333 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1334 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001335 }
1336 /*
1337 * Map the skb buffer first.
1338 */
1339 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1340
1341 err = pci_dma_mapping_error(qdev->pdev, map);
1342 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001343 netif_err(qdev, tx_queued, qdev->ndev,
1344 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001345
1346 return NETDEV_TX_BUSY;
1347 }
1348
1349 tbd->len = cpu_to_le32(len);
1350 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001351 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001353 map_idx++;
1354
1355 /*
1356 * This loop fills the remainder of the 8 address descriptors
1357 * in the IOCB. If there are more than 7 fragments, then the
1358 * eighth address desc will point to an external list (OAL).
1359 * When this happens, the remainder of the frags will be stored
1360 * in this list.
1361 */
1362 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1363 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1364 tbd++;
1365 if (frag_idx == 6 && frag_cnt > 7) {
1366 /* Let's tack on an sglist.
1367 * Our control block will now
1368 * look like this:
1369 * iocb->seg[0] = skb->data
1370 * iocb->seg[1] = frag[0]
1371 * iocb->seg[2] = frag[1]
1372 * iocb->seg[3] = frag[2]
1373 * iocb->seg[4] = frag[3]
1374 * iocb->seg[5] = frag[4]
1375 * iocb->seg[6] = frag[5]
1376 * iocb->seg[7] = ptr to OAL (external sglist)
1377 * oal->seg[0] = frag[6]
1378 * oal->seg[1] = frag[7]
1379 * oal->seg[2] = frag[8]
1380 * oal->seg[3] = frag[9]
1381 * oal->seg[4] = frag[10]
1382 * etc...
1383 */
1384 /* Tack on the OAL in the eighth segment of IOCB. */
1385 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1386 sizeof(struct oal),
1387 PCI_DMA_TODEVICE);
1388 err = pci_dma_mapping_error(qdev->pdev, map);
1389 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001390 netif_err(qdev, tx_queued, qdev->ndev,
1391 "PCI mapping outbound address list with error: %d\n",
1392 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001393 goto map_error;
1394 }
1395
1396 tbd->addr = cpu_to_le64(map);
1397 /*
1398 * The length is the number of fragments
1399 * that remain to be mapped times the length
1400 * of our sglist (OAL).
1401 */
1402 tbd->len =
1403 cpu_to_le32((sizeof(struct tx_buf_desc) *
1404 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001405 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001406 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001407 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001408 sizeof(struct oal));
1409 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1410 map_idx++;
1411 }
1412
1413 map =
1414 pci_map_page(qdev->pdev, frag->page,
1415 frag->page_offset, frag->size,
1416 PCI_DMA_TODEVICE);
1417
1418 err = pci_dma_mapping_error(qdev->pdev, map);
1419 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001420 netif_err(qdev, tx_queued, qdev->ndev,
1421 "PCI mapping frags failed with error: %d.\n",
1422 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001423 goto map_error;
1424 }
1425
1426 tbd->addr = cpu_to_le64(map);
1427 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001428 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1429 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001430 frag->size);
1431
1432 }
1433 /* Save the number of segments we've mapped. */
1434 tx_ring_desc->map_cnt = map_idx;
1435 /* Terminate the last segment. */
1436 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1437 return NETDEV_TX_OK;
1438
1439map_error:
1440 /*
1441 * If the first frag mapping failed, then i will be zero.
1442 * This causes the unmap of the skb->data area. Otherwise
1443 * we pass in the number of frags that mapped successfully
1444 * so they can be umapped.
1445 */
1446 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1447 return NETDEV_TX_BUSY;
1448}
1449
Ron Mercer4f848c02010-01-02 10:37:43 +00001450/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001451static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1452 struct rx_ring *rx_ring,
1453 struct ib_mac_iocb_rsp *ib_mac_rsp,
1454 u32 length,
1455 u16 vlan_id)
1456{
1457 struct sk_buff *skb;
1458 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1459 struct skb_frag_struct *rx_frag;
1460 int nr_frags;
1461 struct napi_struct *napi = &rx_ring->napi;
1462
1463 napi->dev = qdev->ndev;
1464
1465 skb = napi_get_frags(napi);
1466 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001467 netif_err(qdev, drv, qdev->ndev,
1468 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001469 rx_ring->rx_dropped++;
1470 put_page(lbq_desc->p.pg_chunk.page);
1471 return;
1472 }
1473 prefetch(lbq_desc->p.pg_chunk.va);
1474 rx_frag = skb_shinfo(skb)->frags;
1475 nr_frags = skb_shinfo(skb)->nr_frags;
1476 rx_frag += nr_frags;
1477 rx_frag->page = lbq_desc->p.pg_chunk.page;
1478 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1479 rx_frag->size = length;
1480
1481 skb->len += length;
1482 skb->data_len += length;
1483 skb->truesize += length;
1484 skb_shinfo(skb)->nr_frags++;
1485
1486 rx_ring->rx_packets++;
1487 rx_ring->rx_bytes += length;
1488 skb->ip_summed = CHECKSUM_UNNECESSARY;
1489 skb_record_rx_queue(skb, rx_ring->cq_id);
1490 if (qdev->vlgrp && (vlan_id != 0xffff))
1491 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1492 else
1493 napi_gro_frags(napi);
1494}
1495
1496/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001497static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1498 struct rx_ring *rx_ring,
1499 struct ib_mac_iocb_rsp *ib_mac_rsp,
1500 u32 length,
1501 u16 vlan_id)
1502{
1503 struct net_device *ndev = qdev->ndev;
1504 struct sk_buff *skb = NULL;
1505 void *addr;
1506 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1507 struct napi_struct *napi = &rx_ring->napi;
1508
1509 skb = netdev_alloc_skb(ndev, length);
1510 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001511 netif_err(qdev, drv, qdev->ndev,
1512 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001513 rx_ring->rx_dropped++;
1514 put_page(lbq_desc->p.pg_chunk.page);
1515 return;
1516 }
1517
1518 addr = lbq_desc->p.pg_chunk.va;
1519 prefetch(addr);
1520
1521
1522 /* Frame error, so drop the packet. */
1523 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001524 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001525 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001526 rx_ring->rx_errors++;
1527 goto err_out;
1528 }
1529
1530 /* The max framesize filter on this chip is set higher than
1531 * MTU since FCoE uses 2k frames.
1532 */
1533 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001534 netif_err(qdev, drv, qdev->ndev,
1535 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001536 rx_ring->rx_dropped++;
1537 goto err_out;
1538 }
1539 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1542 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001543 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1544 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1545 length-ETH_HLEN);
1546 skb->len += length-ETH_HLEN;
1547 skb->data_len += length-ETH_HLEN;
1548 skb->truesize += length-ETH_HLEN;
1549
1550 rx_ring->rx_packets++;
1551 rx_ring->rx_bytes += skb->len;
1552 skb->protocol = eth_type_trans(skb, ndev);
1553 skb->ip_summed = CHECKSUM_NONE;
1554
1555 if (qdev->rx_csum &&
1556 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1557 /* TCP frame. */
1558 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1563 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1564 /* Unfragmented ipv4 UDP frame. */
1565 struct iphdr *iph = (struct iphdr *) skb->data;
1566 if (!(iph->frag_off &
1567 cpu_to_be16(IP_MF|IP_OFFSET))) {
1568 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001569 netif_printk(qdev, rx_status, KERN_DEBUG,
1570 qdev->ndev,
1571 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001572 }
1573 }
1574 }
1575
1576 skb_record_rx_queue(skb, rx_ring->cq_id);
1577 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1578 if (qdev->vlgrp && (vlan_id != 0xffff))
1579 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1580 else
1581 napi_gro_receive(napi, skb);
1582 } else {
1583 if (qdev->vlgrp && (vlan_id != 0xffff))
1584 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1585 else
1586 netif_receive_skb(skb);
1587 }
1588 return;
1589err_out:
1590 dev_kfree_skb_any(skb);
1591 put_page(lbq_desc->p.pg_chunk.page);
1592}
1593
1594/* Process an inbound completion from an rx ring. */
1595static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1596 struct rx_ring *rx_ring,
1597 struct ib_mac_iocb_rsp *ib_mac_rsp,
1598 u32 length,
1599 u16 vlan_id)
1600{
1601 struct net_device *ndev = qdev->ndev;
1602 struct sk_buff *skb = NULL;
1603 struct sk_buff *new_skb = NULL;
1604 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1605
1606 skb = sbq_desc->p.skb;
1607 /* Allocate new_skb and copy */
1608 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1609 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001610 netif_err(qdev, probe, qdev->ndev,
1611 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001612 rx_ring->rx_dropped++;
1613 return;
1614 }
1615 skb_reserve(new_skb, NET_IP_ALIGN);
1616 memcpy(skb_put(new_skb, length), skb->data, length);
1617 skb = new_skb;
1618
1619 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001621 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001622 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001623 dev_kfree_skb_any(skb);
1624 rx_ring->rx_errors++;
1625 return;
1626 }
1627
1628 /* loopback self test for ethtool */
1629 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1630 ql_check_lb_frame(qdev, skb);
1631 dev_kfree_skb_any(skb);
1632 return;
1633 }
1634
1635 /* The max framesize filter on this chip is set higher than
1636 * MTU since FCoE uses 2k frames.
1637 */
1638 if (skb->len > ndev->mtu + ETH_HLEN) {
1639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_dropped++;
1641 return;
1642 }
1643
1644 prefetch(skb->data);
1645 skb->dev = ndev;
1646 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001647 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1648 "%s Multicast.\n",
1649 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1650 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1651 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1652 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1653 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1654 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001655 }
1656 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001657 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001659
1660 rx_ring->rx_packets++;
1661 rx_ring->rx_bytes += skb->len;
1662 skb->protocol = eth_type_trans(skb, ndev);
1663 skb->ip_summed = CHECKSUM_NONE;
1664
1665 /* If rx checksum is on, and there are no
1666 * csum or frame errors.
1667 */
1668 if (qdev->rx_csum &&
1669 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1670 /* TCP frame. */
1671 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001672 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001674 skb->ip_summed = CHECKSUM_UNNECESSARY;
1675 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1676 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1677 /* Unfragmented ipv4 UDP frame. */
1678 struct iphdr *iph = (struct iphdr *) skb->data;
1679 if (!(iph->frag_off &
1680 cpu_to_be16(IP_MF|IP_OFFSET))) {
1681 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001682 netif_printk(qdev, rx_status, KERN_DEBUG,
1683 qdev->ndev,
1684 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001685 }
1686 }
1687 }
1688
1689 skb_record_rx_queue(skb, rx_ring->cq_id);
1690 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1691 if (qdev->vlgrp && (vlan_id != 0xffff))
1692 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1693 vlan_id, skb);
1694 else
1695 napi_gro_receive(&rx_ring->napi, skb);
1696 } else {
1697 if (qdev->vlgrp && (vlan_id != 0xffff))
1698 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1699 else
1700 netif_receive_skb(skb);
1701 }
1702}
1703
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001704static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001705{
1706 void *temp_addr = skb->data;
1707
1708 /* Undo the skb_reserve(skb,32) we did before
1709 * giving to hardware, and realign data on
1710 * a 2-byte boundary.
1711 */
1712 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1713 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1714 skb_copy_to_linear_data(skb, temp_addr,
1715 (unsigned int)len);
1716}
1717
1718/*
1719 * This function builds an skb for the given inbound
1720 * completion. It will be rewritten for readability in the near
1721 * future, but for not it works well.
1722 */
1723static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1724 struct rx_ring *rx_ring,
1725 struct ib_mac_iocb_rsp *ib_mac_rsp)
1726{
1727 struct bq_desc *lbq_desc;
1728 struct bq_desc *sbq_desc;
1729 struct sk_buff *skb = NULL;
1730 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1731 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1732
1733 /*
1734 * Handle the header buffer if present.
1735 */
1736 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1737 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001738 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1739 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001740 /*
1741 * Headers fit nicely into a small buffer.
1742 */
1743 sbq_desc = ql_get_curr_sbuf(rx_ring);
1744 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001745 dma_unmap_addr(sbq_desc, mapaddr),
1746 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001747 PCI_DMA_FROMDEVICE);
1748 skb = sbq_desc->p.skb;
1749 ql_realign_skb(skb, hdr_len);
1750 skb_put(skb, hdr_len);
1751 sbq_desc->p.skb = NULL;
1752 }
1753
1754 /*
1755 * Handle the data buffer(s).
1756 */
1757 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001758 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1759 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760 return skb;
1761 }
1762
1763 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1764 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766 "Headers in small, data of %d bytes in small, combine them.\n",
1767 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001768 /*
1769 * Data is less than small buffer size so it's
1770 * stuffed in a small buffer.
1771 * For this case we append the data
1772 * from the "data" small buffer to the "header" small
1773 * buffer.
1774 */
1775 sbq_desc = ql_get_curr_sbuf(rx_ring);
1776 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001777 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001778 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001779 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001780 (sbq_desc, maplen),
1781 PCI_DMA_FROMDEVICE);
1782 memcpy(skb_put(skb, length),
1783 sbq_desc->p.skb->data, length);
1784 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001785 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001786 (sbq_desc,
1787 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001788 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001789 (sbq_desc,
1790 maplen),
1791 PCI_DMA_FROMDEVICE);
1792 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001793 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1794 "%d bytes in a single small buffer.\n",
1795 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 sbq_desc = ql_get_curr_sbuf(rx_ring);
1797 skb = sbq_desc->p.skb;
1798 ql_realign_skb(skb, length);
1799 skb_put(skb, length);
1800 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001801 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001802 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001803 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001804 maplen),
1805 PCI_DMA_FROMDEVICE);
1806 sbq_desc->p.skb = NULL;
1807 }
1808 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1809 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001810 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1811 "Header in small, %d bytes in large. Chain large to small!\n",
1812 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001813 /*
1814 * The data is in a single large buffer. We
1815 * chain it to the header buffer's skb and let
1816 * it rip.
1817 */
Ron Mercer7c734352009-10-19 03:32:19 +00001818 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "Chaining page at offset = %d, for %d bytes to skb.\n",
1821 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001822 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1823 lbq_desc->p.pg_chunk.offset,
1824 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001825 skb->len += length;
1826 skb->data_len += length;
1827 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 } else {
1829 /*
1830 * The headers and data are in a single large buffer. We
1831 * copy it to a new skb and let it go. This can happen with
1832 * jumbo mtu on a non-TCP/UDP frame.
1833 */
Ron Mercer7c734352009-10-19 03:32:19 +00001834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001835 skb = netdev_alloc_skb(qdev->ndev, length);
1836 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001837 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1838 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001839 return NULL;
1840 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001841 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001842 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001843 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001844 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001845 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001846 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1849 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001850 skb_fill_page_desc(skb, 0,
1851 lbq_desc->p.pg_chunk.page,
1852 lbq_desc->p.pg_chunk.offset,
1853 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001854 skb->len += length;
1855 skb->data_len += length;
1856 skb->truesize += length;
1857 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001858 __pskb_pull_tail(skb,
1859 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1860 VLAN_ETH_HLEN : ETH_HLEN);
1861 }
1862 } else {
1863 /*
1864 * The data is in a chain of large buffers
1865 * pointed to by a small buffer. We loop
1866 * thru and chain them to the our small header
1867 * buffer's skb.
1868 * frags: There are 18 max frags and our small
1869 * buffer will hold 32 of them. The thing is,
1870 * we'll use 3 max for our 9000 byte jumbo
1871 * frames. If the MTU goes up we could
1872 * eventually be in trouble.
1873 */
Ron Mercer7c734352009-10-19 03:32:19 +00001874 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001875 sbq_desc = ql_get_curr_sbuf(rx_ring);
1876 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001877 dma_unmap_addr(sbq_desc, mapaddr),
1878 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001879 PCI_DMA_FROMDEVICE);
1880 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1881 /*
1882 * This is an non TCP/UDP IP frame, so
1883 * the headers aren't split into a small
1884 * buffer. We have to use the small buffer
1885 * that contains our sg list as our skb to
1886 * send upstairs. Copy the sg list here to
1887 * a local buffer and use it to find the
1888 * pages to chain.
1889 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001890 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1891 "%d bytes of headers & data in chain of large.\n",
1892 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001893 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001894 sbq_desc->p.skb = NULL;
1895 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896 }
1897 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001898 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1899 size = (length < rx_ring->lbq_buf_size) ? length :
1900 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001901
Joe Perchesae9540f72010-02-09 11:49:52 +00001902 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1903 "Adding page %d to skb for %d bytes.\n",
1904 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001905 skb_fill_page_desc(skb, i,
1906 lbq_desc->p.pg_chunk.page,
1907 lbq_desc->p.pg_chunk.offset,
1908 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001909 skb->len += size;
1910 skb->data_len += size;
1911 skb->truesize += size;
1912 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001913 i++;
1914 }
1915 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1916 VLAN_ETH_HLEN : ETH_HLEN);
1917 }
1918 return skb;
1919}
1920
1921/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001922static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001923 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001924 struct ib_mac_iocb_rsp *ib_mac_rsp,
1925 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001926{
1927 struct net_device *ndev = qdev->ndev;
1928 struct sk_buff *skb = NULL;
1929
1930 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1931
1932 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1933 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001934 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1935 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001936 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001937 return;
1938 }
1939
Ron Mercera32959c2009-06-09 05:39:27 +00001940 /* Frame error, so drop the packet. */
1941 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001942 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001943 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001944 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001945 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001946 return;
1947 }
Ron Mercerec33a492009-06-09 05:39:28 +00001948
1949 /* The max framesize filter on this chip is set higher than
1950 * MTU since FCoE uses 2k frames.
1951 */
1952 if (skb->len > ndev->mtu + ETH_HLEN) {
1953 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001954 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001955 return;
1956 }
1957
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001958 /* loopback self test for ethtool */
1959 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1960 ql_check_lb_frame(qdev, skb);
1961 dev_kfree_skb_any(skb);
1962 return;
1963 }
1964
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001965 prefetch(skb->data);
1966 skb->dev = ndev;
1967 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001968 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1969 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1970 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1971 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1972 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1973 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001975 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001976 }
1977 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001978 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001980 }
Ron Mercerd555f592009-03-09 10:59:19 +00001981
Ron Mercerd555f592009-03-09 10:59:19 +00001982 skb->protocol = eth_type_trans(skb, ndev);
1983 skb->ip_summed = CHECKSUM_NONE;
1984
1985 /* If rx checksum is on, and there are no
1986 * csum or frame errors.
1987 */
1988 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001989 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1990 /* TCP frame. */
1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001992 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001994 skb->ip_summed = CHECKSUM_UNNECESSARY;
1995 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1996 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1997 /* Unfragmented ipv4 UDP frame. */
1998 struct iphdr *iph = (struct iphdr *) skb->data;
1999 if (!(iph->frag_off &
2000 cpu_to_be16(IP_MF|IP_OFFSET))) {
2001 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002004 }
2005 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002006 }
Ron Mercerd555f592009-03-09 10:59:19 +00002007
Ron Mercer885ee392009-11-03 13:49:31 +00002008 rx_ring->rx_packets++;
2009 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002010 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002011 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2012 if (qdev->vlgrp &&
2013 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2014 (vlan_id != 0))
2015 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2016 vlan_id, skb);
2017 else
2018 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002019 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002020 if (qdev->vlgrp &&
2021 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2022 (vlan_id != 0))
2023 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2024 else
2025 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002026 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002027}
2028
Ron Mercer4f848c02010-01-02 10:37:43 +00002029/* Process an inbound completion from an rx ring. */
2030static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2031 struct rx_ring *rx_ring,
2032 struct ib_mac_iocb_rsp *ib_mac_rsp)
2033{
2034 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2035 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2036 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2037 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2038
2039 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2040
2041 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2042 /* The data and headers are split into
2043 * separate buffers.
2044 */
2045 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2046 vlan_id);
2047 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2048 /* The data fit in a single small buffer.
2049 * Allocate a new skb, copy the data and
2050 * return the buffer to the free pool.
2051 */
2052 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2053 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002054 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2055 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2056 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2057 /* TCP packet in a page chunk that's been checksummed.
2058 * Tack it on to our GRO skb and let it go.
2059 */
2060 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2061 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002062 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2063 /* Non-TCP packet in a page chunk. Allocate an
2064 * skb, tack it on frags, and send it up.
2065 */
2066 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2067 length, vlan_id);
2068 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002069 /* Non-TCP/UDP large frames that span multiple buffers
2070 * can be processed corrrectly by the split frame logic.
2071 */
2072 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2073 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002074 }
2075
2076 return (unsigned long)length;
2077}
2078
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002079/* Process an outbound completion from an rx ring. */
2080static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2081 struct ob_mac_iocb_rsp *mac_rsp)
2082{
2083 struct tx_ring *tx_ring;
2084 struct tx_ring_desc *tx_ring_desc;
2085
2086 QL_DUMP_OB_MAC_RSP(mac_rsp);
2087 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2088 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2089 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002090 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2091 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002092 dev_kfree_skb(tx_ring_desc->skb);
2093 tx_ring_desc->skb = NULL;
2094
2095 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2096 OB_MAC_IOCB_RSP_S |
2097 OB_MAC_IOCB_RSP_L |
2098 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2099 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002100 netif_warn(qdev, tx_done, qdev->ndev,
2101 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002102 }
2103 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002104 netif_warn(qdev, tx_done, qdev->ndev,
2105 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002106 }
2107 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002108 netif_warn(qdev, tx_done, qdev->ndev,
2109 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002110 }
2111 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002112 netif_warn(qdev, tx_done, qdev->ndev,
2113 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002114 }
2115 }
2116 atomic_inc(&tx_ring->tx_count);
2117}
2118
2119/* Fire up a handler to reset the MPI processor. */
2120void ql_queue_fw_error(struct ql_adapter *qdev)
2121{
Ron Mercer6a473302009-07-02 06:06:12 +00002122 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2124}
2125
2126void ql_queue_asic_error(struct ql_adapter *qdev)
2127{
Ron Mercer6a473302009-07-02 06:06:12 +00002128 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002129 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002130 /* Clear adapter up bit to signal the recovery
2131 * process that it shouldn't kill the reset worker
2132 * thread
2133 */
2134 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002135 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2136}
2137
2138static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2139 struct ib_ae_iocb_rsp *ib_ae_rsp)
2140{
2141 switch (ib_ae_rsp->event) {
2142 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002143 netif_err(qdev, rx_err, qdev->ndev,
2144 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 ql_queue_fw_error(qdev);
2146 return;
2147
2148 case CAM_LOOKUP_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002149 netif_err(qdev, link, qdev->ndev,
2150 "Multiple CAM hits lookup occurred.\n");
2151 netif_err(qdev, drv, qdev->ndev,
2152 "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002153 ql_queue_asic_error(qdev);
2154 return;
2155
2156 case SOFT_ECC_ERROR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002157 netif_err(qdev, rx_err, qdev->ndev,
2158 "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002159 ql_queue_asic_error(qdev);
2160 break;
2161
2162 case PCI_ERR_ANON_BUF_RD:
Joe Perchesae9540f72010-02-09 11:49:52 +00002163 netif_err(qdev, rx_err, qdev->ndev,
2164 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2165 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 ql_queue_asic_error(qdev);
2167 break;
2168
2169 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002170 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2171 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002172 ql_queue_asic_error(qdev);
2173 break;
2174 }
2175}
2176
2177static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2178{
2179 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002180 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002181 struct ob_mac_iocb_rsp *net_rsp = NULL;
2182 int count = 0;
2183
Ron Mercer1e213302009-03-09 10:59:21 +00002184 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185 /* While there are entries in the completion queue. */
2186 while (prod != rx_ring->cnsmr_idx) {
2187
Joe Perchesae9540f72010-02-09 11:49:52 +00002188 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2189 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2190 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002191
2192 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2193 rmb();
2194 switch (net_rsp->opcode) {
2195
2196 case OPCODE_OB_MAC_TSO_IOCB:
2197 case OPCODE_OB_MAC_IOCB:
2198 ql_process_mac_tx_intr(qdev, net_rsp);
2199 break;
2200 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002201 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2202 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2203 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002204 }
2205 count++;
2206 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002207 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002208 }
2209 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002210 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2211 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2212 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002213 if (atomic_read(&tx_ring->queue_stopped) &&
2214 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2215 /*
2216 * The queue got stopped because the tx_ring was full.
2217 * Wake it up, because it's now at least 25% empty.
2218 */
Ron Mercer1e213302009-03-09 10:59:21 +00002219 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002220 }
2221
2222 return count;
2223}
2224
2225static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2226{
2227 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002228 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002229 struct ql_net_rsp_iocb *net_rsp;
2230 int count = 0;
2231
2232 /* While there are entries in the completion queue. */
2233 while (prod != rx_ring->cnsmr_idx) {
2234
Joe Perchesae9540f72010-02-09 11:49:52 +00002235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2236 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2237 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002238
2239 net_rsp = rx_ring->curr_entry;
2240 rmb();
2241 switch (net_rsp->opcode) {
2242 case OPCODE_IB_MAC_IOCB:
2243 ql_process_mac_rx_intr(qdev, rx_ring,
2244 (struct ib_mac_iocb_rsp *)
2245 net_rsp);
2246 break;
2247
2248 case OPCODE_IB_AE_IOCB:
2249 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2250 net_rsp);
2251 break;
2252 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002253 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2254 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2255 net_rsp->opcode);
2256 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002257 }
2258 count++;
2259 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002260 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002261 if (count == budget)
2262 break;
2263 }
2264 ql_update_buffer_queues(qdev, rx_ring);
2265 ql_write_cq_idx(rx_ring);
2266 return count;
2267}
2268
2269static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2270{
2271 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2272 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002273 struct rx_ring *trx_ring;
2274 int i, work_done = 0;
2275 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002276
Joe Perchesae9540f72010-02-09 11:49:52 +00002277 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2278 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002279
Ron Mercer39aa8162009-08-27 11:02:11 +00002280 /* Service the TX rings first. They start
2281 * right after the RSS rings. */
2282 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2283 trx_ring = &qdev->rx_ring[i];
2284 /* If this TX completion ring belongs to this vector and
2285 * it's not empty then service it.
2286 */
2287 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2288 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2289 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002290 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2291 "%s: Servicing TX completion ring %d.\n",
2292 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002293 ql_clean_outbound_rx_ring(trx_ring);
2294 }
2295 }
2296
2297 /*
2298 * Now service the RSS ring if it's active.
2299 */
2300 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2301 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002302 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2303 "%s: Servicing RX completion ring %d.\n",
2304 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002305 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2306 }
2307
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002308 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002309 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002310 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2311 }
2312 return work_done;
2313}
2314
Ron Mercer01e6b952009-10-30 12:13:34 +00002315static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002316{
2317 struct ql_adapter *qdev = netdev_priv(ndev);
2318
2319 qdev->vlgrp = grp;
2320 if (grp) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002321 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2322 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002323 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2324 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2325 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00002326 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2327 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2329 }
2330}
2331
Ron Mercer01e6b952009-10-30 12:13:34 +00002332static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002333{
2334 struct ql_adapter *qdev = netdev_priv(ndev);
2335 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002336 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002337
Ron Mercercc288f52009-02-23 10:42:14 +00002338 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2339 if (status)
2340 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002341 if (ql_set_mac_addr_reg
2342 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002343 netif_err(qdev, ifup, qdev->ndev,
2344 "Failed to init vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345 }
Ron Mercercc288f52009-02-23 10:42:14 +00002346 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002347}
2348
Ron Mercer01e6b952009-10-30 12:13:34 +00002349static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002350{
2351 struct ql_adapter *qdev = netdev_priv(ndev);
2352 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002353 int status;
2354
2355 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2356 if (status)
2357 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002358
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002359 if (ql_set_mac_addr_reg
2360 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002361 netif_err(qdev, ifup, qdev->ndev,
2362 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002363 }
Ron Mercercc288f52009-02-23 10:42:14 +00002364 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002365
2366}
2367
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002368/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2369static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2370{
2371 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002372 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002373 return IRQ_HANDLED;
2374}
2375
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002376/* This handles a fatal error, MPI activity, and the default
2377 * rx_ring in an MSI-X multiple vector environment.
2378 * In MSI/Legacy environment it also process the rest of
2379 * the rx_rings.
2380 */
2381static irqreturn_t qlge_isr(int irq, void *dev_id)
2382{
2383 struct rx_ring *rx_ring = dev_id;
2384 struct ql_adapter *qdev = rx_ring->qdev;
2385 struct intr_context *intr_context = &qdev->intr_context[0];
2386 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002387 int work_done = 0;
2388
Ron Mercerbb0d2152008-10-20 10:30:26 -07002389 spin_lock(&qdev->hw_lock);
2390 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002391 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2392 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002393 spin_unlock(&qdev->hw_lock);
2394 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002395 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002396 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002397
Ron Mercerbb0d2152008-10-20 10:30:26 -07002398 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002399
2400 /*
2401 * Check for fatal error.
2402 */
2403 if (var & STS_FE) {
2404 ql_queue_asic_error(qdev);
Joe Perchesae9540f72010-02-09 11:49:52 +00002405 netif_err(qdev, intr, qdev->ndev,
2406 "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002407 var = ql_read32(qdev, ERR_STS);
Joe Perchesae9540f72010-02-09 11:49:52 +00002408 netif_err(qdev, intr, qdev->ndev,
2409 "Resetting chip. Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002410 return IRQ_HANDLED;
2411 }
2412
2413 /*
2414 * Check MPI processor activity.
2415 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002416 if ((var & STS_PI) &&
2417 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002418 /*
2419 * We've got an async event or mailbox completion.
2420 * Handle it and clear the source of the interrupt.
2421 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002422 netif_err(qdev, intr, qdev->ndev,
2423 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002424 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002425 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2426 queue_delayed_work_on(smp_processor_id(),
2427 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002428 work_done++;
2429 }
2430
2431 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002432 * Get the bit-mask that shows the active queues for this
2433 * pass. Compare it to the queues that this irq services
2434 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002435 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002436 var = ql_read32(qdev, ISR1);
2437 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002438 netif_info(qdev, intr, qdev->ndev,
2439 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002440 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002441 napi_schedule(&rx_ring->napi);
2442 work_done++;
2443 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002444 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002445 return work_done ? IRQ_HANDLED : IRQ_NONE;
2446}
2447
2448static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2449{
2450
2451 if (skb_is_gso(skb)) {
2452 int err;
2453 if (skb_header_cloned(skb)) {
2454 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2455 if (err)
2456 return err;
2457 }
2458
2459 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2460 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2461 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2462 mac_iocb_ptr->total_hdrs_len =
2463 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2464 mac_iocb_ptr->net_trans_offset =
2465 cpu_to_le16(skb_network_offset(skb) |
2466 skb_transport_offset(skb)
2467 << OB_MAC_TRANSPORT_HDR_SHIFT);
2468 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2469 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2470 if (likely(skb->protocol == htons(ETH_P_IP))) {
2471 struct iphdr *iph = ip_hdr(skb);
2472 iph->check = 0;
2473 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2474 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2475 iph->daddr, 0,
2476 IPPROTO_TCP,
2477 0);
2478 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2479 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2480 tcp_hdr(skb)->check =
2481 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2482 &ipv6_hdr(skb)->daddr,
2483 0, IPPROTO_TCP, 0);
2484 }
2485 return 1;
2486 }
2487 return 0;
2488}
2489
2490static void ql_hw_csum_setup(struct sk_buff *skb,
2491 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2492{
2493 int len;
2494 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002495 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002496 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2497 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2498 mac_iocb_ptr->net_trans_offset =
2499 cpu_to_le16(skb_network_offset(skb) |
2500 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2501
2502 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2503 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2504 if (likely(iph->protocol == IPPROTO_TCP)) {
2505 check = &(tcp_hdr(skb)->check);
2506 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2507 mac_iocb_ptr->total_hdrs_len =
2508 cpu_to_le16(skb_transport_offset(skb) +
2509 (tcp_hdr(skb)->doff << 2));
2510 } else {
2511 check = &(udp_hdr(skb)->check);
2512 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2513 mac_iocb_ptr->total_hdrs_len =
2514 cpu_to_le16(skb_transport_offset(skb) +
2515 sizeof(struct udphdr));
2516 }
2517 *check = ~csum_tcpudp_magic(iph->saddr,
2518 iph->daddr, len, iph->protocol, 0);
2519}
2520
Stephen Hemminger613573252009-08-31 19:50:58 +00002521static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002522{
2523 struct tx_ring_desc *tx_ring_desc;
2524 struct ob_mac_iocb_req *mac_iocb_ptr;
2525 struct ql_adapter *qdev = netdev_priv(ndev);
2526 int tso;
2527 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002528 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002529
2530 tx_ring = &qdev->tx_ring[tx_ring_idx];
2531
Ron Mercer74c50b42009-03-09 10:59:27 +00002532 if (skb_padto(skb, ETH_ZLEN))
2533 return NETDEV_TX_OK;
2534
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002535 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002536 netif_info(qdev, tx_queued, qdev->ndev,
2537 "%s: shutting down tx queue %d du to lack of resources.\n",
2538 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002539 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002540 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002541 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002542 return NETDEV_TX_BUSY;
2543 }
2544 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2545 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002546 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002547
2548 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2549 mac_iocb_ptr->tid = tx_ring_desc->index;
2550 /* We use the upper 32-bits to store the tx queue for this IO.
2551 * When we get the completion we can use it to establish the context.
2552 */
2553 mac_iocb_ptr->txq_idx = tx_ring_idx;
2554 tx_ring_desc->skb = skb;
2555
2556 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2557
2558 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002559 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2560 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002561 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2562 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2563 }
2564 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2565 if (tso < 0) {
2566 dev_kfree_skb_any(skb);
2567 return NETDEV_TX_OK;
2568 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2569 ql_hw_csum_setup(skb,
2570 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2571 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002572 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2573 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002574 netif_err(qdev, tx_queued, qdev->ndev,
2575 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002576 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002577 return NETDEV_TX_BUSY;
2578 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002579 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2580 tx_ring->prod_idx++;
2581 if (tx_ring->prod_idx == tx_ring->wq_len)
2582 tx_ring->prod_idx = 0;
2583 wmb();
2584
2585 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002586 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2587 "tx queued, slot %d, len %d\n",
2588 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002589
2590 atomic_dec(&tx_ring->tx_count);
2591 return NETDEV_TX_OK;
2592}
2593
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002594
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002595static void ql_free_shadow_space(struct ql_adapter *qdev)
2596{
2597 if (qdev->rx_ring_shadow_reg_area) {
2598 pci_free_consistent(qdev->pdev,
2599 PAGE_SIZE,
2600 qdev->rx_ring_shadow_reg_area,
2601 qdev->rx_ring_shadow_reg_dma);
2602 qdev->rx_ring_shadow_reg_area = NULL;
2603 }
2604 if (qdev->tx_ring_shadow_reg_area) {
2605 pci_free_consistent(qdev->pdev,
2606 PAGE_SIZE,
2607 qdev->tx_ring_shadow_reg_area,
2608 qdev->tx_ring_shadow_reg_dma);
2609 qdev->tx_ring_shadow_reg_area = NULL;
2610 }
2611}
2612
2613static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2614{
2615 qdev->rx_ring_shadow_reg_area =
2616 pci_alloc_consistent(qdev->pdev,
2617 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2618 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002619 netif_err(qdev, ifup, qdev->ndev,
2620 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002621 return -ENOMEM;
2622 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002623 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002624 qdev->tx_ring_shadow_reg_area =
2625 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2626 &qdev->tx_ring_shadow_reg_dma);
2627 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002628 netif_err(qdev, ifup, qdev->ndev,
2629 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002630 goto err_wqp_sh_area;
2631 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002632 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002633 return 0;
2634
2635err_wqp_sh_area:
2636 pci_free_consistent(qdev->pdev,
2637 PAGE_SIZE,
2638 qdev->rx_ring_shadow_reg_area,
2639 qdev->rx_ring_shadow_reg_dma);
2640 return -ENOMEM;
2641}
2642
2643static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2644{
2645 struct tx_ring_desc *tx_ring_desc;
2646 int i;
2647 struct ob_mac_iocb_req *mac_iocb_ptr;
2648
2649 mac_iocb_ptr = tx_ring->wq_base;
2650 tx_ring_desc = tx_ring->q;
2651 for (i = 0; i < tx_ring->wq_len; i++) {
2652 tx_ring_desc->index = i;
2653 tx_ring_desc->skb = NULL;
2654 tx_ring_desc->queue_entry = mac_iocb_ptr;
2655 mac_iocb_ptr++;
2656 tx_ring_desc++;
2657 }
2658 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2659 atomic_set(&tx_ring->queue_stopped, 0);
2660}
2661
2662static void ql_free_tx_resources(struct ql_adapter *qdev,
2663 struct tx_ring *tx_ring)
2664{
2665 if (tx_ring->wq_base) {
2666 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2667 tx_ring->wq_base, tx_ring->wq_base_dma);
2668 tx_ring->wq_base = NULL;
2669 }
2670 kfree(tx_ring->q);
2671 tx_ring->q = NULL;
2672}
2673
2674static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2675 struct tx_ring *tx_ring)
2676{
2677 tx_ring->wq_base =
2678 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2679 &tx_ring->wq_base_dma);
2680
Joe Perches8e95a202009-12-03 07:58:21 +00002681 if ((tx_ring->wq_base == NULL) ||
2682 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002683 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002684 return -ENOMEM;
2685 }
2686 tx_ring->q =
2687 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2688 if (tx_ring->q == NULL)
2689 goto err;
2690
2691 return 0;
2692err:
2693 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2694 tx_ring->wq_base, tx_ring->wq_base_dma);
2695 return -ENOMEM;
2696}
2697
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002698static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002699{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002700 struct bq_desc *lbq_desc;
2701
Ron Mercer7c734352009-10-19 03:32:19 +00002702 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002703
Ron Mercer7c734352009-10-19 03:32:19 +00002704 curr_idx = rx_ring->lbq_curr_idx;
2705 clean_idx = rx_ring->lbq_clean_idx;
2706 while (curr_idx != clean_idx) {
2707 lbq_desc = &rx_ring->lbq[curr_idx];
2708
2709 if (lbq_desc->p.pg_chunk.last_flag) {
2710 pci_unmap_page(qdev->pdev,
2711 lbq_desc->p.pg_chunk.map,
2712 ql_lbq_block_size(qdev),
2713 PCI_DMA_FROMDEVICE);
2714 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002715 }
Ron Mercer7c734352009-10-19 03:32:19 +00002716
2717 put_page(lbq_desc->p.pg_chunk.page);
2718 lbq_desc->p.pg_chunk.page = NULL;
2719
2720 if (++curr_idx == rx_ring->lbq_len)
2721 curr_idx = 0;
2722
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002723 }
2724}
2725
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002726static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002727{
2728 int i;
2729 struct bq_desc *sbq_desc;
2730
2731 for (i = 0; i < rx_ring->sbq_len; i++) {
2732 sbq_desc = &rx_ring->sbq[i];
2733 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002734 netif_err(qdev, ifup, qdev->ndev,
2735 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002736 return;
2737 }
2738 if (sbq_desc->p.skb) {
2739 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002740 dma_unmap_addr(sbq_desc, mapaddr),
2741 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002742 PCI_DMA_FROMDEVICE);
2743 dev_kfree_skb(sbq_desc->p.skb);
2744 sbq_desc->p.skb = NULL;
2745 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002746 }
2747}
2748
Ron Mercer4545a3f2009-02-23 10:42:17 +00002749/* Free all large and small rx buffers associated
2750 * with the completion queues for this device.
2751 */
2752static void ql_free_rx_buffers(struct ql_adapter *qdev)
2753{
2754 int i;
2755 struct rx_ring *rx_ring;
2756
2757 for (i = 0; i < qdev->rx_ring_count; i++) {
2758 rx_ring = &qdev->rx_ring[i];
2759 if (rx_ring->lbq)
2760 ql_free_lbq_buffers(qdev, rx_ring);
2761 if (rx_ring->sbq)
2762 ql_free_sbq_buffers(qdev, rx_ring);
2763 }
2764}
2765
2766static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2767{
2768 struct rx_ring *rx_ring;
2769 int i;
2770
2771 for (i = 0; i < qdev->rx_ring_count; i++) {
2772 rx_ring = &qdev->rx_ring[i];
2773 if (rx_ring->type != TX_Q)
2774 ql_update_buffer_queues(qdev, rx_ring);
2775 }
2776}
2777
2778static void ql_init_lbq_ring(struct ql_adapter *qdev,
2779 struct rx_ring *rx_ring)
2780{
2781 int i;
2782 struct bq_desc *lbq_desc;
2783 __le64 *bq = rx_ring->lbq_base;
2784
2785 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2786 for (i = 0; i < rx_ring->lbq_len; i++) {
2787 lbq_desc = &rx_ring->lbq[i];
2788 memset(lbq_desc, 0, sizeof(*lbq_desc));
2789 lbq_desc->index = i;
2790 lbq_desc->addr = bq;
2791 bq++;
2792 }
2793}
2794
2795static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002796 struct rx_ring *rx_ring)
2797{
2798 int i;
2799 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002800 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002801
Ron Mercer4545a3f2009-02-23 10:42:17 +00002802 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002803 for (i = 0; i < rx_ring->sbq_len; i++) {
2804 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002805 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002806 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002807 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002808 bq++;
2809 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002810}
2811
2812static void ql_free_rx_resources(struct ql_adapter *qdev,
2813 struct rx_ring *rx_ring)
2814{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002815 /* Free the small buffer queue. */
2816 if (rx_ring->sbq_base) {
2817 pci_free_consistent(qdev->pdev,
2818 rx_ring->sbq_size,
2819 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2820 rx_ring->sbq_base = NULL;
2821 }
2822
2823 /* Free the small buffer queue control blocks. */
2824 kfree(rx_ring->sbq);
2825 rx_ring->sbq = NULL;
2826
2827 /* Free the large buffer queue. */
2828 if (rx_ring->lbq_base) {
2829 pci_free_consistent(qdev->pdev,
2830 rx_ring->lbq_size,
2831 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2832 rx_ring->lbq_base = NULL;
2833 }
2834
2835 /* Free the large buffer queue control blocks. */
2836 kfree(rx_ring->lbq);
2837 rx_ring->lbq = NULL;
2838
2839 /* Free the rx queue. */
2840 if (rx_ring->cq_base) {
2841 pci_free_consistent(qdev->pdev,
2842 rx_ring->cq_size,
2843 rx_ring->cq_base, rx_ring->cq_base_dma);
2844 rx_ring->cq_base = NULL;
2845 }
2846}
2847
2848/* Allocate queues and buffers for this completions queue based
2849 * on the values in the parameter structure. */
2850static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2851 struct rx_ring *rx_ring)
2852{
2853
2854 /*
2855 * Allocate the completion queue for this rx_ring.
2856 */
2857 rx_ring->cq_base =
2858 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2859 &rx_ring->cq_base_dma);
2860
2861 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002862 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002863 return -ENOMEM;
2864 }
2865
2866 if (rx_ring->sbq_len) {
2867 /*
2868 * Allocate small buffer queue.
2869 */
2870 rx_ring->sbq_base =
2871 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2872 &rx_ring->sbq_base_dma);
2873
2874 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002875 netif_err(qdev, ifup, qdev->ndev,
2876 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002877 goto err_mem;
2878 }
2879
2880 /*
2881 * Allocate small buffer queue control blocks.
2882 */
2883 rx_ring->sbq =
2884 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2885 GFP_KERNEL);
2886 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002887 netif_err(qdev, ifup, qdev->ndev,
2888 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002889 goto err_mem;
2890 }
2891
Ron Mercer4545a3f2009-02-23 10:42:17 +00002892 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002893 }
2894
2895 if (rx_ring->lbq_len) {
2896 /*
2897 * Allocate large buffer queue.
2898 */
2899 rx_ring->lbq_base =
2900 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2901 &rx_ring->lbq_base_dma);
2902
2903 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002904 netif_err(qdev, ifup, qdev->ndev,
2905 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002906 goto err_mem;
2907 }
2908 /*
2909 * Allocate large buffer queue control blocks.
2910 */
2911 rx_ring->lbq =
2912 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2913 GFP_KERNEL);
2914 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002915 netif_err(qdev, ifup, qdev->ndev,
2916 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002917 goto err_mem;
2918 }
2919
Ron Mercer4545a3f2009-02-23 10:42:17 +00002920 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002921 }
2922
2923 return 0;
2924
2925err_mem:
2926 ql_free_rx_resources(qdev, rx_ring);
2927 return -ENOMEM;
2928}
2929
2930static void ql_tx_ring_clean(struct ql_adapter *qdev)
2931{
2932 struct tx_ring *tx_ring;
2933 struct tx_ring_desc *tx_ring_desc;
2934 int i, j;
2935
2936 /*
2937 * Loop through all queues and free
2938 * any resources.
2939 */
2940 for (j = 0; j < qdev->tx_ring_count; j++) {
2941 tx_ring = &qdev->tx_ring[j];
2942 for (i = 0; i < tx_ring->wq_len; i++) {
2943 tx_ring_desc = &tx_ring->q[i];
2944 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002945 netif_err(qdev, ifdown, qdev->ndev,
2946 "Freeing lost SKB %p, from queue %d, index %d.\n",
2947 tx_ring_desc->skb, j,
2948 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002949 ql_unmap_send(qdev, tx_ring_desc,
2950 tx_ring_desc->map_cnt);
2951 dev_kfree_skb(tx_ring_desc->skb);
2952 tx_ring_desc->skb = NULL;
2953 }
2954 }
2955 }
2956}
2957
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002958static void ql_free_mem_resources(struct ql_adapter *qdev)
2959{
2960 int i;
2961
2962 for (i = 0; i < qdev->tx_ring_count; i++)
2963 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2964 for (i = 0; i < qdev->rx_ring_count; i++)
2965 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2966 ql_free_shadow_space(qdev);
2967}
2968
2969static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2970{
2971 int i;
2972
2973 /* Allocate space for our shadow registers and such. */
2974 if (ql_alloc_shadow_space(qdev))
2975 return -ENOMEM;
2976
2977 for (i = 0; i < qdev->rx_ring_count; i++) {
2978 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002979 netif_err(qdev, ifup, qdev->ndev,
2980 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002981 goto err_mem;
2982 }
2983 }
2984 /* Allocate tx queue resources */
2985 for (i = 0; i < qdev->tx_ring_count; i++) {
2986 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002987 netif_err(qdev, ifup, qdev->ndev,
2988 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002989 goto err_mem;
2990 }
2991 }
2992 return 0;
2993
2994err_mem:
2995 ql_free_mem_resources(qdev);
2996 return -ENOMEM;
2997}
2998
2999/* Set up the rx ring control block and pass it to the chip.
3000 * The control block is defined as
3001 * "Completion Queue Initialization Control Block", or cqicb.
3002 */
3003static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3004{
3005 struct cqicb *cqicb = &rx_ring->cqicb;
3006 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003007 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003008 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003009 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003010 void __iomem *doorbell_area =
3011 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3012 int err = 0;
3013 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003014 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003015 __le64 *base_indirect_ptr;
3016 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003017
3018 /* Set up the shadow registers for this ring. */
3019 rx_ring->prod_idx_sh_reg = shadow_reg;
3020 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003021 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003022 shadow_reg += sizeof(u64);
3023 shadow_reg_dma += sizeof(u64);
3024 rx_ring->lbq_base_indirect = shadow_reg;
3025 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003026 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3027 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003028 rx_ring->sbq_base_indirect = shadow_reg;
3029 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3030
3031 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003032 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003033 rx_ring->cnsmr_idx = 0;
3034 rx_ring->curr_entry = rx_ring->cq_base;
3035
3036 /* PCI doorbell mem area + 0x04 for valid register */
3037 rx_ring->valid_db_reg = doorbell_area + 0x04;
3038
3039 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003040 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003041
3042 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003043 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003044
3045 memset((void *)cqicb, 0, sizeof(struct cqicb));
3046 cqicb->msix_vect = rx_ring->irq;
3047
Ron Mercer459caf52009-01-04 17:08:11 -08003048 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3049 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003050
Ron Mercer97345522009-01-09 11:31:50 +00003051 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003052
Ron Mercer97345522009-01-09 11:31:50 +00003053 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003054
3055 /*
3056 * Set up the control block load flags.
3057 */
3058 cqicb->flags = FLAGS_LC | /* Load queue base address */
3059 FLAGS_LV | /* Load MSI-X vector */
3060 FLAGS_LI; /* Load irq delay values */
3061 if (rx_ring->lbq_len) {
3062 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003063 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003064 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3065 page_entries = 0;
3066 do {
3067 *base_indirect_ptr = cpu_to_le64(tmp);
3068 tmp += DB_PAGE_SIZE;
3069 base_indirect_ptr++;
3070 page_entries++;
3071 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003072 cqicb->lbq_addr =
3073 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003074 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3075 (u16) rx_ring->lbq_buf_size;
3076 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3077 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3078 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003079 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003080 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003081 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003082 rx_ring->lbq_clean_idx = 0;
3083 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003084 }
3085 if (rx_ring->sbq_len) {
3086 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003087 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003088 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3089 page_entries = 0;
3090 do {
3091 *base_indirect_ptr = cpu_to_le64(tmp);
3092 tmp += DB_PAGE_SIZE;
3093 base_indirect_ptr++;
3094 page_entries++;
3095 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003096 cqicb->sbq_addr =
3097 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003098 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003099 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003100 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3101 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003102 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003103 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003104 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003105 rx_ring->sbq_clean_idx = 0;
3106 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003107 }
3108 switch (rx_ring->type) {
3109 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003110 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3111 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3112 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003113 case RX_Q:
3114 /* Inbound completion handling rx_rings run in
3115 * separate NAPI contexts.
3116 */
3117 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3118 64);
3119 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3120 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3121 break;
3122 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003123 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3124 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003125 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003126 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3127 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003128 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3129 CFG_LCQ, rx_ring->cq_id);
3130 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003131 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003132 return err;
3133 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134 return err;
3135}
3136
3137static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3138{
3139 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3140 void __iomem *doorbell_area =
3141 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3142 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3143 (tx_ring->wq_id * sizeof(u64));
3144 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3145 (tx_ring->wq_id * sizeof(u64));
3146 int err = 0;
3147
3148 /*
3149 * Assign doorbell registers for this tx_ring.
3150 */
3151 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003152 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003153 tx_ring->prod_idx = 0;
3154 /* TX PCI doorbell mem area + 0x04 */
3155 tx_ring->valid_db_reg = doorbell_area + 0x04;
3156
3157 /*
3158 * Assign shadow registers for this tx_ring.
3159 */
3160 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3161 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3162
3163 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3164 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3165 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3166 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3167 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003168 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003169
Ron Mercer97345522009-01-09 11:31:50 +00003170 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003171
3172 ql_init_tx_ring(qdev, tx_ring);
3173
Ron Mercere3324712009-07-02 06:06:13 +00003174 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003175 (u16) tx_ring->wq_id);
3176 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003177 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003178 return err;
3179 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003180 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3181 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 return err;
3183}
3184
3185static void ql_disable_msix(struct ql_adapter *qdev)
3186{
3187 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3188 pci_disable_msix(qdev->pdev);
3189 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3190 kfree(qdev->msi_x_entry);
3191 qdev->msi_x_entry = NULL;
3192 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3193 pci_disable_msi(qdev->pdev);
3194 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3195 }
3196}
3197
Ron Mercera4ab6132009-08-27 11:02:10 +00003198/* We start by trying to get the number of vectors
3199 * stored in qdev->intr_count. If we don't get that
3200 * many then we reduce the count and try again.
3201 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003202static void ql_enable_msix(struct ql_adapter *qdev)
3203{
Ron Mercera4ab6132009-08-27 11:02:10 +00003204 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003205
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003206 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003207 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003208 /* Try to alloc space for the msix struct,
3209 * if it fails then go to MSI/legacy.
3210 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003211 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003212 sizeof(struct msix_entry),
3213 GFP_KERNEL);
3214 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003215 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003216 goto msi;
3217 }
3218
Ron Mercera4ab6132009-08-27 11:02:10 +00003219 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003220 qdev->msi_x_entry[i].entry = i;
3221
Ron Mercera4ab6132009-08-27 11:02:10 +00003222 /* Loop to get our vectors. We start with
3223 * what we want and settle for what we get.
3224 */
3225 do {
3226 err = pci_enable_msix(qdev->pdev,
3227 qdev->msi_x_entry, qdev->intr_count);
3228 if (err > 0)
3229 qdev->intr_count = err;
3230 } while (err > 0);
3231
3232 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003233 kfree(qdev->msi_x_entry);
3234 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003235 netif_warn(qdev, ifup, qdev->ndev,
3236 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003237 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003238 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003239 } else if (err == 0) {
3240 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003241 netif_info(qdev, ifup, qdev->ndev,
3242 "MSI-X Enabled, got %d vectors.\n",
3243 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003244 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003245 }
3246 }
3247msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003248 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003249 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003250 if (!pci_enable_msi(qdev->pdev)) {
3251 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003252 netif_info(qdev, ifup, qdev->ndev,
3253 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003254 return;
3255 }
3256 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003257 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003258 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3259 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260}
3261
Ron Mercer39aa8162009-08-27 11:02:11 +00003262/* Each vector services 1 RSS ring and and 1 or more
3263 * TX completion rings. This function loops through
3264 * the TX completion rings and assigns the vector that
3265 * will service it. An example would be if there are
3266 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3267 * This would mean that vector 0 would service RSS ring 0
3268 * and TX competion rings 0,1,2 and 3. Vector 1 would
3269 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3270 */
3271static void ql_set_tx_vect(struct ql_adapter *qdev)
3272{
3273 int i, j, vect;
3274 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3275
3276 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3277 /* Assign irq vectors to TX rx_rings.*/
3278 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3279 i < qdev->rx_ring_count; i++) {
3280 if (j == tx_rings_per_vector) {
3281 vect++;
3282 j = 0;
3283 }
3284 qdev->rx_ring[i].irq = vect;
3285 j++;
3286 }
3287 } else {
3288 /* For single vector all rings have an irq
3289 * of zero.
3290 */
3291 for (i = 0; i < qdev->rx_ring_count; i++)
3292 qdev->rx_ring[i].irq = 0;
3293 }
3294}
3295
3296/* Set the interrupt mask for this vector. Each vector
3297 * will service 1 RSS ring and 1 or more TX completion
3298 * rings. This function sets up a bit mask per vector
3299 * that indicates which rings it services.
3300 */
3301static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3302{
3303 int j, vect = ctx->intr;
3304 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3305
3306 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3307 /* Add the RSS ring serviced by this vector
3308 * to the mask.
3309 */
3310 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3311 /* Add the TX ring(s) serviced by this vector
3312 * to the mask. */
3313 for (j = 0; j < tx_rings_per_vector; j++) {
3314 ctx->irq_mask |=
3315 (1 << qdev->rx_ring[qdev->rss_ring_count +
3316 (vect * tx_rings_per_vector) + j].cq_id);
3317 }
3318 } else {
3319 /* For single vector we just shift each queue's
3320 * ID into the mask.
3321 */
3322 for (j = 0; j < qdev->rx_ring_count; j++)
3323 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3324 }
3325}
3326
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003327/*
3328 * Here we build the intr_context structures based on
3329 * our rx_ring count and intr vector count.
3330 * The intr_context structure is used to hook each vector
3331 * to possibly different handlers.
3332 */
3333static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3334{
3335 int i = 0;
3336 struct intr_context *intr_context = &qdev->intr_context[0];
3337
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003338 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3339 /* Each rx_ring has it's
3340 * own intr_context since we have separate
3341 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003342 */
3343 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3344 qdev->rx_ring[i].irq = i;
3345 intr_context->intr = i;
3346 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003347 /* Set up this vector's bit-mask that indicates
3348 * which queues it services.
3349 */
3350 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003351 /*
3352 * We set up each vectors enable/disable/read bits so
3353 * there's no bit/mask calculations in the critical path.
3354 */
3355 intr_context->intr_en_mask =
3356 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3357 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3358 | i;
3359 intr_context->intr_dis_mask =
3360 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3361 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3362 INTR_EN_IHD | i;
3363 intr_context->intr_read_mask =
3364 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3365 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3366 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003367 if (i == 0) {
3368 /* The first vector/queue handles
3369 * broadcast/multicast, fatal errors,
3370 * and firmware events. This in addition
3371 * to normal inbound NAPI processing.
3372 */
3373 intr_context->handler = qlge_isr;
3374 sprintf(intr_context->name, "%s-rx-%d",
3375 qdev->ndev->name, i);
3376 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003377 /*
3378 * Inbound queues handle unicast frames only.
3379 */
3380 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003381 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003382 qdev->ndev->name, i);
3383 }
3384 }
3385 } else {
3386 /*
3387 * All rx_rings use the same intr_context since
3388 * there is only one vector.
3389 */
3390 intr_context->intr = 0;
3391 intr_context->qdev = qdev;
3392 /*
3393 * We set up each vectors enable/disable/read bits so
3394 * there's no bit/mask calculations in the critical path.
3395 */
3396 intr_context->intr_en_mask =
3397 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3398 intr_context->intr_dis_mask =
3399 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3400 INTR_EN_TYPE_DISABLE;
3401 intr_context->intr_read_mask =
3402 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3403 /*
3404 * Single interrupt means one handler for all rings.
3405 */
3406 intr_context->handler = qlge_isr;
3407 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003408 /* Set up this vector's bit-mask that indicates
3409 * which queues it services. In this case there is
3410 * a single vector so it will service all RSS and
3411 * TX completion rings.
3412 */
3413 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003414 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003415 /* Tell the TX completion rings which MSIx vector
3416 * they will be using.
3417 */
3418 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003419}
3420
3421static void ql_free_irq(struct ql_adapter *qdev)
3422{
3423 int i;
3424 struct intr_context *intr_context = &qdev->intr_context[0];
3425
3426 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3427 if (intr_context->hooked) {
3428 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3429 free_irq(qdev->msi_x_entry[i].vector,
3430 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003431 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3432 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003433 } else {
3434 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003435 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3436 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003437 }
3438 }
3439 }
3440 ql_disable_msix(qdev);
3441}
3442
3443static int ql_request_irq(struct ql_adapter *qdev)
3444{
3445 int i;
3446 int status = 0;
3447 struct pci_dev *pdev = qdev->pdev;
3448 struct intr_context *intr_context = &qdev->intr_context[0];
3449
3450 ql_resolve_queues_to_irqs(qdev);
3451
3452 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3453 atomic_set(&intr_context->irq_cnt, 0);
3454 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3455 status = request_irq(qdev->msi_x_entry[i].vector,
3456 intr_context->handler,
3457 0,
3458 intr_context->name,
3459 &qdev->rx_ring[i]);
3460 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003461 netif_err(qdev, ifup, qdev->ndev,
3462 "Failed request for MSIX interrupt %d.\n",
3463 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003464 goto err_irq;
3465 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003466 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3467 "Hooked intr %d, queue type %s, with name %s.\n",
3468 i,
3469 qdev->rx_ring[i].type == DEFAULT_Q ?
3470 "DEFAULT_Q" :
3471 qdev->rx_ring[i].type == TX_Q ?
3472 "TX_Q" :
3473 qdev->rx_ring[i].type == RX_Q ?
3474 "RX_Q" : "",
3475 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003476 }
3477 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003478 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3479 "trying msi or legacy interrupts.\n");
3480 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3481 "%s: irq = %d.\n", __func__, pdev->irq);
3482 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3483 "%s: context->name = %s.\n", __func__,
3484 intr_context->name);
3485 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3486 "%s: dev_id = 0x%p.\n", __func__,
3487 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003488 status =
3489 request_irq(pdev->irq, qlge_isr,
3490 test_bit(QL_MSI_ENABLED,
3491 &qdev->
3492 flags) ? 0 : IRQF_SHARED,
3493 intr_context->name, &qdev->rx_ring[0]);
3494 if (status)
3495 goto err_irq;
3496
Joe Perchesae9540f72010-02-09 11:49:52 +00003497 netif_err(qdev, ifup, qdev->ndev,
3498 "Hooked intr %d, queue type %s, with name %s.\n",
3499 i,
3500 qdev->rx_ring[0].type == DEFAULT_Q ?
3501 "DEFAULT_Q" :
3502 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3503 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3504 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003505 }
3506 intr_context->hooked = 1;
3507 }
3508 return status;
3509err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003510 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003511 ql_free_irq(qdev);
3512 return status;
3513}
3514
3515static int ql_start_rss(struct ql_adapter *qdev)
3516{
Ron Mercer541ae282009-10-08 09:54:37 +00003517 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3518 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3519 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3520 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3521 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3522 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003523 struct ricb *ricb = &qdev->ricb;
3524 int status = 0;
3525 int i;
3526 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3527
Ron Mercere3324712009-07-02 06:06:13 +00003528 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003529
Ron Mercerb2014ff2009-08-27 11:02:09 +00003530 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003531 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003532 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3533 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003534
3535 /*
3536 * Fill out the Indirection Table.
3537 */
Ron Mercer541ae282009-10-08 09:54:37 +00003538 for (i = 0; i < 1024; i++)
3539 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540
Ron Mercer541ae282009-10-08 09:54:37 +00003541 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3542 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003543
Joe Perchesae9540f72010-02-09 11:49:52 +00003544 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003545
Ron Mercere3324712009-07-02 06:06:13 +00003546 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003547 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003548 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549 return status;
3550 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003551 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3552 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553 return status;
3554}
3555
Ron Mercera5f59dc2009-07-02 06:06:07 +00003556static int ql_clear_routing_entries(struct ql_adapter *qdev)
3557{
3558 int i, status = 0;
3559
3560 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3561 if (status)
3562 return status;
3563 /* Clear all the entries in the routing table. */
3564 for (i = 0; i < 16; i++) {
3565 status = ql_set_routing_reg(qdev, i, 0, 0);
3566 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003567 netif_err(qdev, ifup, qdev->ndev,
3568 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003569 break;
3570 }
3571 }
3572 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3573 return status;
3574}
3575
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003576/* Initialize the frame-to-queue routing. */
3577static int ql_route_initialize(struct ql_adapter *qdev)
3578{
3579 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003580
3581 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003582 status = ql_clear_routing_entries(qdev);
3583 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003584 return status;
3585
3586 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3587 if (status)
3588 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003589
3590 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3591 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003592 netif_err(qdev, ifup, qdev->ndev,
3593 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003594 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003595 }
3596 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3597 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003598 netif_err(qdev, ifup, qdev->ndev,
3599 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003600 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003601 }
3602 /* If we have more than one inbound queue, then turn on RSS in the
3603 * routing block.
3604 */
3605 if (qdev->rss_ring_count > 1) {
3606 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3607 RT_IDX_RSS_MATCH, 1);
3608 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003609 netif_err(qdev, ifup, qdev->ndev,
3610 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003611 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003612 }
3613 }
3614
3615 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3616 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003617 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003618 netif_err(qdev, ifup, qdev->ndev,
3619 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003620exit:
3621 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003622 return status;
3623}
3624
Ron Mercer2ee1e272009-03-03 12:10:33 +00003625int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003626{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003627 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003628
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003629 /* If check if the link is up and use to
3630 * determine if we are setting or clearing
3631 * the MAC address in the CAM.
3632 */
3633 set = ql_read32(qdev, STS);
3634 set &= qdev->port_link_up;
3635 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003636 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003637 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003638 return status;
3639 }
3640
3641 status = ql_route_initialize(qdev);
3642 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003643 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003644
3645 return status;
3646}
3647
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003648static int ql_adapter_initialize(struct ql_adapter *qdev)
3649{
3650 u32 value, mask;
3651 int i;
3652 int status = 0;
3653
3654 /*
3655 * Set up the System register to halt on errors.
3656 */
3657 value = SYS_EFE | SYS_FAE;
3658 mask = value << 16;
3659 ql_write32(qdev, SYS, mask | value);
3660
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003661 /* Set the default queue, and VLAN behavior. */
3662 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3663 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003664 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3665
3666 /* Set the MPI interrupt to enabled. */
3667 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3668
3669 /* Enable the function, set pagesize, enable error checking. */
3670 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003671 FSC_EC | FSC_VM_PAGE_4K;
3672 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003673
3674 /* Set/clear header splitting. */
3675 mask = FSC_VM_PAGESIZE_MASK |
3676 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3677 ql_write32(qdev, FSC, mask | value);
3678
Ron Mercer572c5262010-01-02 10:37:42 +00003679 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003680
Ron Mercera3b71932009-10-08 09:54:38 +00003681 /* Set RX packet routing to use port/pci function on which the
3682 * packet arrived on in addition to usual frame routing.
3683 * This is helpful on bonding where both interfaces can have
3684 * the same MAC address.
3685 */
3686 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003687 /* Reroute all packets to our Interface.
3688 * They may have been routed to MPI firmware
3689 * due to WOL.
3690 */
3691 value = ql_read32(qdev, MGMT_RCV_CFG);
3692 value &= ~MGMT_RCV_CFG_RM;
3693 mask = 0xffff0000;
3694
3695 /* Sticky reg needs clearing due to WOL. */
3696 ql_write32(qdev, MGMT_RCV_CFG, mask);
3697 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3698
3699 /* Default WOL is enable on Mezz cards */
3700 if (qdev->pdev->subsystem_device == 0x0068 ||
3701 qdev->pdev->subsystem_device == 0x0180)
3702 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003703
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003704 /* Start up the rx queues. */
3705 for (i = 0; i < qdev->rx_ring_count; i++) {
3706 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3707 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003708 netif_err(qdev, ifup, qdev->ndev,
3709 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003710 return status;
3711 }
3712 }
3713
3714 /* If there is more than one inbound completion queue
3715 * then download a RICB to configure RSS.
3716 */
3717 if (qdev->rss_ring_count > 1) {
3718 status = ql_start_rss(qdev);
3719 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003720 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003721 return status;
3722 }
3723 }
3724
3725 /* Start up the tx queues. */
3726 for (i = 0; i < qdev->tx_ring_count; i++) {
3727 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3728 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003729 netif_err(qdev, ifup, qdev->ndev,
3730 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003731 return status;
3732 }
3733 }
3734
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003735 /* Initialize the port and set the max framesize. */
3736 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003737 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003738 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003739
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003740 /* Set up the MAC address and frame routing filter. */
3741 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003742 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003743 netif_err(qdev, ifup, qdev->ndev,
3744 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003745 return status;
3746 }
3747
3748 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003749 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003750 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3751 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003752 napi_enable(&qdev->rx_ring[i].napi);
3753 }
3754
3755 return status;
3756}
3757
3758/* Issue soft reset to chip. */
3759static int ql_adapter_reset(struct ql_adapter *qdev)
3760{
3761 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003762 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003763 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003764
Ron Mercera5f59dc2009-07-02 06:06:07 +00003765 /* Clear all the entries in the routing table. */
3766 status = ql_clear_routing_entries(qdev);
3767 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003768 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003769 return status;
3770 }
3771
3772 end_jiffies = jiffies +
3773 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003774
3775 /* Stop management traffic. */
3776 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3777
3778 /* Wait for the NIC and MGMNT FIFOs to empty. */
3779 ql_wait_fifo_empty(qdev);
3780
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003781 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003782
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003783 do {
3784 value = ql_read32(qdev, RST_FO);
3785 if ((value & RST_FO_FR) == 0)
3786 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003787 cpu_relax();
3788 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003789
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003790 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003791 netif_err(qdev, ifdown, qdev->ndev,
3792 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003793 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003794 }
3795
Ron Mercer84087f42009-10-08 09:54:41 +00003796 /* Resume management traffic. */
3797 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003798 return status;
3799}
3800
3801static void ql_display_dev_info(struct net_device *ndev)
3802{
3803 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3804
Joe Perchesae9540f72010-02-09 11:49:52 +00003805 netif_info(qdev, probe, qdev->ndev,
3806 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3807 "XG Roll = %d, XG Rev = %d.\n",
3808 qdev->func,
3809 qdev->port,
3810 qdev->chip_rev_id & 0x0000000f,
3811 qdev->chip_rev_id >> 4 & 0x0000000f,
3812 qdev->chip_rev_id >> 8 & 0x0000000f,
3813 qdev->chip_rev_id >> 12 & 0x0000000f);
3814 netif_info(qdev, probe, qdev->ndev,
3815 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003816}
3817
Ron Mercerbc083ce2009-10-21 11:07:40 +00003818int ql_wol(struct ql_adapter *qdev)
3819{
3820 int status = 0;
3821 u32 wol = MB_WOL_DISABLE;
3822
3823 /* The CAM is still intact after a reset, but if we
3824 * are doing WOL, then we may need to program the
3825 * routing regs. We would also need to issue the mailbox
3826 * commands to instruct the MPI what to do per the ethtool
3827 * settings.
3828 */
3829
3830 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3831 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003832 netif_err(qdev, ifdown, qdev->ndev,
3833 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3834 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003835 return -EINVAL;
3836 }
3837
3838 if (qdev->wol & WAKE_MAGIC) {
3839 status = ql_mb_wol_set_magic(qdev, 1);
3840 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003841 netif_err(qdev, ifdown, qdev->ndev,
3842 "Failed to set magic packet on %s.\n",
3843 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003844 return status;
3845 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003846 netif_info(qdev, drv, qdev->ndev,
3847 "Enabled magic packet successfully on %s.\n",
3848 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003849
3850 wol |= MB_WOL_MAGIC_PKT;
3851 }
3852
3853 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003854 wol |= MB_WOL_MODE_ON;
3855 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003856 netif_err(qdev, drv, qdev->ndev,
3857 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003858 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003859 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003860 }
3861
3862 return status;
3863}
3864
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003865static int ql_adapter_down(struct ql_adapter *qdev)
3866{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003867 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003868
Ron Mercer6a473302009-07-02 06:06:12 +00003869 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003870
Ron Mercer6497b602009-02-12 16:37:13 -08003871 /* Don't kill the reset worker thread if we
3872 * are in the process of recovery.
3873 */
3874 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3875 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003876 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3877 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003878 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003879 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003880 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003881
Ron Mercer39aa8162009-08-27 11:02:11 +00003882 for (i = 0; i < qdev->rss_ring_count; i++)
3883 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003884
3885 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3886
3887 ql_disable_interrupts(qdev);
3888
3889 ql_tx_ring_clean(qdev);
3890
Ron Mercer6b318cb2009-03-09 10:59:26 +00003891 /* Call netif_napi_del() from common point.
3892 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003893 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003894 netif_napi_del(&qdev->rx_ring[i].napi);
3895
Ron Mercer4545a3f2009-02-23 10:42:17 +00003896 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003897
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003898 status = ql_adapter_reset(qdev);
3899 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003900 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3901 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003902 return status;
3903}
3904
3905static int ql_adapter_up(struct ql_adapter *qdev)
3906{
3907 int err = 0;
3908
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003909 err = ql_adapter_initialize(qdev);
3910 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003911 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003912 goto err_init;
3913 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003914 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003915 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003916 /* If the port is initialized and the
3917 * link is up the turn on the carrier.
3918 */
3919 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3920 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003921 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003922 /* Restore rx mode. */
3923 clear_bit(QL_ALLMULTI, &qdev->flags);
3924 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3925 qlge_set_multicast_list(qdev->ndev);
3926
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003927 ql_enable_interrupts(qdev);
3928 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003929 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003930
3931 return 0;
3932err_init:
3933 ql_adapter_reset(qdev);
3934 return err;
3935}
3936
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003937static void ql_release_adapter_resources(struct ql_adapter *qdev)
3938{
3939 ql_free_mem_resources(qdev);
3940 ql_free_irq(qdev);
3941}
3942
3943static int ql_get_adapter_resources(struct ql_adapter *qdev)
3944{
3945 int status = 0;
3946
3947 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003948 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003949 return -ENOMEM;
3950 }
3951 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003952 return status;
3953}
3954
3955static int qlge_close(struct net_device *ndev)
3956{
3957 struct ql_adapter *qdev = netdev_priv(ndev);
3958
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003959 /* If we hit pci_channel_io_perm_failure
3960 * failure condition, then we already
3961 * brought the adapter down.
3962 */
3963 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003964 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003965 clear_bit(QL_EEH_FATAL, &qdev->flags);
3966 return 0;
3967 }
3968
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003969 /*
3970 * Wait for device to recover from a reset.
3971 * (Rarely happens, but possible.)
3972 */
3973 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3974 msleep(1);
3975 ql_adapter_down(qdev);
3976 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003977 return 0;
3978}
3979
3980static int ql_configure_rings(struct ql_adapter *qdev)
3981{
3982 int i;
3983 struct rx_ring *rx_ring;
3984 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003985 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00003986 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3987 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3988
3989 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003990
Ron Mercera4ab6132009-08-27 11:02:10 +00003991 /* In a perfect world we have one RSS ring for each CPU
3992 * and each has it's own vector. To do that we ask for
3993 * cpu_cnt vectors. ql_enable_msix() will adjust the
3994 * vector count to what we actually get. We then
3995 * allocate an RSS ring for each.
3996 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003997 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003998 qdev->intr_count = cpu_cnt;
3999 ql_enable_msix(qdev);
4000 /* Adjust the RSS ring count to the actual vector count. */
4001 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004002 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004003 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004004
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004005 for (i = 0; i < qdev->tx_ring_count; i++) {
4006 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004007 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004008 tx_ring->qdev = qdev;
4009 tx_ring->wq_id = i;
4010 tx_ring->wq_len = qdev->tx_ring_size;
4011 tx_ring->wq_size =
4012 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4013
4014 /*
4015 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004016 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004017 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004018 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004019 }
4020
4021 for (i = 0; i < qdev->rx_ring_count; i++) {
4022 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004023 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004024 rx_ring->qdev = qdev;
4025 rx_ring->cq_id = i;
4026 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004027 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004028 /*
4029 * Inbound (RSS) queues.
4030 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004031 rx_ring->cq_len = qdev->rx_ring_size;
4032 rx_ring->cq_size =
4033 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4034 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4035 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004036 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004037 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004038 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4039 "lbq_buf_size %d, order = %d\n",
4040 rx_ring->lbq_buf_size,
4041 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004042 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4043 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004044 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004045 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004046 rx_ring->type = RX_Q;
4047 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004048 /*
4049 * Outbound queue handles outbound completions only.
4050 */
4051 /* outbound cq is same size as tx_ring it services. */
4052 rx_ring->cq_len = qdev->tx_ring_size;
4053 rx_ring->cq_size =
4054 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4055 rx_ring->lbq_len = 0;
4056 rx_ring->lbq_size = 0;
4057 rx_ring->lbq_buf_size = 0;
4058 rx_ring->sbq_len = 0;
4059 rx_ring->sbq_size = 0;
4060 rx_ring->sbq_buf_size = 0;
4061 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004062 }
4063 }
4064 return 0;
4065}
4066
4067static int qlge_open(struct net_device *ndev)
4068{
4069 int err = 0;
4070 struct ql_adapter *qdev = netdev_priv(ndev);
4071
Ron Mercer74e12432009-11-11 12:54:04 +00004072 err = ql_adapter_reset(qdev);
4073 if (err)
4074 return err;
4075
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004076 err = ql_configure_rings(qdev);
4077 if (err)
4078 return err;
4079
4080 err = ql_get_adapter_resources(qdev);
4081 if (err)
4082 goto error_up;
4083
4084 err = ql_adapter_up(qdev);
4085 if (err)
4086 goto error_up;
4087
4088 return err;
4089
4090error_up:
4091 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004092 return err;
4093}
4094
Ron Mercer7c734352009-10-19 03:32:19 +00004095static int ql_change_rx_buffers(struct ql_adapter *qdev)
4096{
4097 struct rx_ring *rx_ring;
4098 int i, status;
4099 u32 lbq_buf_len;
4100
4101 /* Wait for an oustanding reset to complete. */
4102 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4103 int i = 3;
4104 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004105 netif_err(qdev, ifup, qdev->ndev,
4106 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004107 ssleep(1);
4108 }
4109
4110 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004111 netif_err(qdev, ifup, qdev->ndev,
4112 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004113 return -ETIMEDOUT;
4114 }
4115 }
4116
4117 status = ql_adapter_down(qdev);
4118 if (status)
4119 goto error;
4120
4121 /* Get the new rx buffer size. */
4122 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4123 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4124 qdev->lbq_buf_order = get_order(lbq_buf_len);
4125
4126 for (i = 0; i < qdev->rss_ring_count; i++) {
4127 rx_ring = &qdev->rx_ring[i];
4128 /* Set the new size. */
4129 rx_ring->lbq_buf_size = lbq_buf_len;
4130 }
4131
4132 status = ql_adapter_up(qdev);
4133 if (status)
4134 goto error;
4135
4136 return status;
4137error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004138 netif_alert(qdev, ifup, qdev->ndev,
4139 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004140 set_bit(QL_ADAPTER_UP, &qdev->flags);
4141 dev_close(qdev->ndev);
4142 return status;
4143}
4144
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004145static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4146{
4147 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004148 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004149
4150 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004151 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004152 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004153 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004154 } else
4155 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004156
4157 queue_delayed_work(qdev->workqueue,
4158 &qdev->mpi_port_cfg_work, 3*HZ);
4159
Breno Leitao746079d2010-02-04 10:11:19 +00004160 ndev->mtu = new_mtu;
4161
Ron Mercer7c734352009-10-19 03:32:19 +00004162 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004163 return 0;
4164 }
4165
Ron Mercer7c734352009-10-19 03:32:19 +00004166 status = ql_change_rx_buffers(qdev);
4167 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004168 netif_err(qdev, ifup, qdev->ndev,
4169 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004170 }
4171
4172 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004173}
4174
4175static struct net_device_stats *qlge_get_stats(struct net_device
4176 *ndev)
4177{
Ron Mercer885ee392009-11-03 13:49:31 +00004178 struct ql_adapter *qdev = netdev_priv(ndev);
4179 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4180 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4181 unsigned long pkts, mcast, dropped, errors, bytes;
4182 int i;
4183
4184 /* Get RX stats. */
4185 pkts = mcast = dropped = errors = bytes = 0;
4186 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4187 pkts += rx_ring->rx_packets;
4188 bytes += rx_ring->rx_bytes;
4189 dropped += rx_ring->rx_dropped;
4190 errors += rx_ring->rx_errors;
4191 mcast += rx_ring->rx_multicast;
4192 }
4193 ndev->stats.rx_packets = pkts;
4194 ndev->stats.rx_bytes = bytes;
4195 ndev->stats.rx_dropped = dropped;
4196 ndev->stats.rx_errors = errors;
4197 ndev->stats.multicast = mcast;
4198
4199 /* Get TX stats. */
4200 pkts = errors = bytes = 0;
4201 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4202 pkts += tx_ring->tx_packets;
4203 bytes += tx_ring->tx_bytes;
4204 errors += tx_ring->tx_errors;
4205 }
4206 ndev->stats.tx_packets = pkts;
4207 ndev->stats.tx_bytes = bytes;
4208 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004209 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004210}
4211
Ron Mercerf2c05002010-07-05 12:19:37 +00004212void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004213{
4214 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004215 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004216 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004217
Ron Mercercc288f52009-02-23 10:42:14 +00004218 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4219 if (status)
4220 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004221 /*
4222 * Set or clear promiscuous mode if a
4223 * transition is taking place.
4224 */
4225 if (ndev->flags & IFF_PROMISC) {
4226 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4227 if (ql_set_routing_reg
4228 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004229 netif_err(qdev, hw, qdev->ndev,
4230 "Failed to set promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004231 } else {
4232 set_bit(QL_PROMISCUOUS, &qdev->flags);
4233 }
4234 }
4235 } else {
4236 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4237 if (ql_set_routing_reg
4238 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004239 netif_err(qdev, hw, qdev->ndev,
4240 "Failed to clear promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004241 } else {
4242 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4243 }
4244 }
4245 }
4246
4247 /*
4248 * Set or clear all multicast mode if a
4249 * transition is taking place.
4250 */
4251 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004252 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004253 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4254 if (ql_set_routing_reg
4255 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004256 netif_err(qdev, hw, qdev->ndev,
4257 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004258 } else {
4259 set_bit(QL_ALLMULTI, &qdev->flags);
4260 }
4261 }
4262 } else {
4263 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4264 if (ql_set_routing_reg
4265 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004266 netif_err(qdev, hw, qdev->ndev,
4267 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004268 } else {
4269 clear_bit(QL_ALLMULTI, &qdev->flags);
4270 }
4271 }
4272 }
4273
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004274 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004275 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4276 if (status)
4277 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004278 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004279 netdev_for_each_mc_addr(ha, ndev) {
4280 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004281 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004282 netif_err(qdev, hw, qdev->ndev,
4283 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004284 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004285 goto exit;
4286 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004287 i++;
4288 }
Ron Mercercc288f52009-02-23 10:42:14 +00004289 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004290 if (ql_set_routing_reg
4291 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004292 netif_err(qdev, hw, qdev->ndev,
4293 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004294 } else {
4295 set_bit(QL_ALLMULTI, &qdev->flags);
4296 }
4297 }
4298exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004299 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004300}
4301
4302static int qlge_set_mac_address(struct net_device *ndev, void *p)
4303{
4304 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4305 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004306 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004307
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004308 if (!is_valid_ether_addr(addr->sa_data))
4309 return -EADDRNOTAVAIL;
4310 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004311 /* Update local copy of current mac address. */
4312 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004313
Ron Mercercc288f52009-02-23 10:42:14 +00004314 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4315 if (status)
4316 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004317 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4318 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004319 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004320 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004321 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4322 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004323}
4324
4325static void qlge_tx_timeout(struct net_device *ndev)
4326{
4327 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004328 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004329}
4330
4331static void ql_asic_reset_work(struct work_struct *work)
4332{
4333 struct ql_adapter *qdev =
4334 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004335 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004336 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004337 status = ql_adapter_down(qdev);
4338 if (status)
4339 goto error;
4340
4341 status = ql_adapter_up(qdev);
4342 if (status)
4343 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004344
4345 /* Restore rx mode. */
4346 clear_bit(QL_ALLMULTI, &qdev->flags);
4347 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4348 qlge_set_multicast_list(qdev->ndev);
4349
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004350 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004351 return;
4352error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004353 netif_alert(qdev, ifup, qdev->ndev,
4354 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004355
Ron Mercerdb988122009-03-09 10:59:17 +00004356 set_bit(QL_ADAPTER_UP, &qdev->flags);
4357 dev_close(qdev->ndev);
4358 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004359}
4360
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004361static struct nic_operations qla8012_nic_ops = {
4362 .get_flash = ql_get_8012_flash_params,
4363 .port_initialize = ql_8012_port_initialize,
4364};
4365
Ron Mercercdca8d02009-03-02 08:07:31 +00004366static struct nic_operations qla8000_nic_ops = {
4367 .get_flash = ql_get_8000_flash_params,
4368 .port_initialize = ql_8000_port_initialize,
4369};
4370
Ron Mercere4552f52009-06-09 05:39:32 +00004371/* Find the pcie function number for the other NIC
4372 * on this chip. Since both NIC functions share a
4373 * common firmware we have the lowest enabled function
4374 * do any common work. Examples would be resetting
4375 * after a fatal firmware error, or doing a firmware
4376 * coredump.
4377 */
4378static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004379{
Ron Mercere4552f52009-06-09 05:39:32 +00004380 int status = 0;
4381 u32 temp;
4382 u32 nic_func1, nic_func2;
4383
4384 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4385 &temp);
4386 if (status)
4387 return status;
4388
4389 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4390 MPI_TEST_NIC_FUNC_MASK);
4391 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4392 MPI_TEST_NIC_FUNC_MASK);
4393
4394 if (qdev->func == nic_func1)
4395 qdev->alt_func = nic_func2;
4396 else if (qdev->func == nic_func2)
4397 qdev->alt_func = nic_func1;
4398 else
4399 status = -EIO;
4400
4401 return status;
4402}
4403
4404static int ql_get_board_info(struct ql_adapter *qdev)
4405{
4406 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004407 qdev->func =
4408 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004409 if (qdev->func > 3)
4410 return -EIO;
4411
4412 status = ql_get_alt_pcie_func(qdev);
4413 if (status)
4414 return status;
4415
4416 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4417 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004418 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4419 qdev->port_link_up = STS_PL1;
4420 qdev->port_init = STS_PI1;
4421 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4422 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4423 } else {
4424 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4425 qdev->port_link_up = STS_PL0;
4426 qdev->port_init = STS_PI0;
4427 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4428 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4429 }
4430 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004431 qdev->device_id = qdev->pdev->device;
4432 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4433 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004434 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4435 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004436 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004437}
4438
4439static void ql_release_all(struct pci_dev *pdev)
4440{
4441 struct net_device *ndev = pci_get_drvdata(pdev);
4442 struct ql_adapter *qdev = netdev_priv(ndev);
4443
4444 if (qdev->workqueue) {
4445 destroy_workqueue(qdev->workqueue);
4446 qdev->workqueue = NULL;
4447 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004448
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004449 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004450 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004451 if (qdev->doorbell_area)
4452 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004453 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004454 pci_release_regions(pdev);
4455 pci_set_drvdata(pdev, NULL);
4456}
4457
4458static int __devinit ql_init_device(struct pci_dev *pdev,
4459 struct net_device *ndev, int cards_found)
4460{
4461 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004462 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004463
Ron Mercere3324712009-07-02 06:06:13 +00004464 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004465 err = pci_enable_device(pdev);
4466 if (err) {
4467 dev_err(&pdev->dev, "PCI device enable failed.\n");
4468 return err;
4469 }
4470
Ron Mercerebd6e772009-09-29 08:39:25 +00004471 qdev->ndev = ndev;
4472 qdev->pdev = pdev;
4473 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004474
Ron Mercerbc9167f2009-10-10 09:35:04 +00004475 /* Set PCIe read request size */
4476 err = pcie_set_readrq(pdev, 4096);
4477 if (err) {
4478 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004479 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004480 }
4481
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004482 err = pci_request_regions(pdev, DRV_NAME);
4483 if (err) {
4484 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004485 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004486 }
4487
4488 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004489 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004490 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004491 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004492 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004493 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004494 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004495 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004496 }
4497
4498 if (err) {
4499 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004500 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004501 }
4502
Ron Mercer73475332009-11-06 07:44:58 +00004503 /* Set PCIe reset type for EEH to fundamental. */
4504 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004505 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004506 qdev->reg_base =
4507 ioremap_nocache(pci_resource_start(pdev, 1),
4508 pci_resource_len(pdev, 1));
4509 if (!qdev->reg_base) {
4510 dev_err(&pdev->dev, "Register mapping failed.\n");
4511 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004512 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004513 }
4514
4515 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4516 qdev->doorbell_area =
4517 ioremap_nocache(pci_resource_start(pdev, 3),
4518 pci_resource_len(pdev, 3));
4519 if (!qdev->doorbell_area) {
4520 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4521 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004522 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004523 }
4524
Ron Mercere4552f52009-06-09 05:39:32 +00004525 err = ql_get_board_info(qdev);
4526 if (err) {
4527 dev_err(&pdev->dev, "Register access failed.\n");
4528 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004529 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004530 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004531 qdev->msg_enable = netif_msg_init(debug, default_msg);
4532 spin_lock_init(&qdev->hw_lock);
4533 spin_lock_init(&qdev->stats_lock);
4534
Ron Mercer8aae2602010-01-15 13:31:28 +00004535 if (qlge_mpi_coredump) {
4536 qdev->mpi_coredump =
4537 vmalloc(sizeof(struct ql_mpi_coredump));
4538 if (qdev->mpi_coredump == NULL) {
4539 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4540 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004541 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004542 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004543 if (qlge_force_coredump)
4544 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004545 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004546 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004547 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004548 if (err) {
4549 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004550 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004551 }
4552
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004553 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004554 /* Keep local copy of current mac address. */
4555 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004556
4557 /* Set up the default ring sizes. */
4558 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4559 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4560
4561 /* Set up the coalescing parameters. */
4562 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4563 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4564 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4565 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4566
4567 /*
4568 * Set up the operating parameters.
4569 */
4570 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4572 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4573 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4574 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004575 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004576 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004577 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004578 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004579
4580 if (!cards_found) {
4581 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4582 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4583 DRV_NAME, DRV_VERSION);
4584 }
4585 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004586err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004588err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004589 pci_disable_device(pdev);
4590 return err;
4591}
4592
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004593static const struct net_device_ops qlge_netdev_ops = {
4594 .ndo_open = qlge_open,
4595 .ndo_stop = qlge_close,
4596 .ndo_start_xmit = qlge_send,
4597 .ndo_change_mtu = qlge_change_mtu,
4598 .ndo_get_stats = qlge_get_stats,
4599 .ndo_set_multicast_list = qlge_set_multicast_list,
4600 .ndo_set_mac_address = qlge_set_mac_address,
4601 .ndo_validate_addr = eth_validate_addr,
4602 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004603 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4604 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4605 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004606};
4607
Ron Mercer15c052f2010-02-04 13:32:46 -08004608static void ql_timer(unsigned long data)
4609{
4610 struct ql_adapter *qdev = (struct ql_adapter *)data;
4611 u32 var = 0;
4612
4613 var = ql_read32(qdev, STS);
4614 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004615 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004616 return;
4617 }
4618
4619 qdev->timer.expires = jiffies + (5*HZ);
4620 add_timer(&qdev->timer);
4621}
4622
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004623static int __devinit qlge_probe(struct pci_dev *pdev,
4624 const struct pci_device_id *pci_entry)
4625{
4626 struct net_device *ndev = NULL;
4627 struct ql_adapter *qdev = NULL;
4628 static int cards_found = 0;
4629 int err = 0;
4630
Ron Mercer1e213302009-03-09 10:59:21 +00004631 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4632 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004633 if (!ndev)
4634 return -ENOMEM;
4635
4636 err = ql_init_device(pdev, ndev, cards_found);
4637 if (err < 0) {
4638 free_netdev(ndev);
4639 return err;
4640 }
4641
4642 qdev = netdev_priv(ndev);
4643 SET_NETDEV_DEV(ndev, &pdev->dev);
4644 ndev->features = (0
4645 | NETIF_F_IP_CSUM
4646 | NETIF_F_SG
4647 | NETIF_F_TSO
4648 | NETIF_F_TSO6
4649 | NETIF_F_TSO_ECN
4650 | NETIF_F_HW_VLAN_TX
4651 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004652 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004653
4654 if (test_bit(QL_DMA64, &qdev->flags))
4655 ndev->features |= NETIF_F_HIGHDMA;
4656
4657 /*
4658 * Set up net_device structure.
4659 */
4660 ndev->tx_queue_len = qdev->tx_ring_size;
4661 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004662
4663 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004664 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004665 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004666
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004667 err = register_netdev(ndev);
4668 if (err) {
4669 dev_err(&pdev->dev, "net device registration failed.\n");
4670 ql_release_all(pdev);
4671 pci_disable_device(pdev);
4672 return err;
4673 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004674 /* Start up the timer to trigger EEH if
4675 * the bus goes dead
4676 */
4677 init_timer_deferrable(&qdev->timer);
4678 qdev->timer.data = (unsigned long)qdev;
4679 qdev->timer.function = ql_timer;
4680 qdev->timer.expires = jiffies + (5*HZ);
4681 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004682 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004683 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004684 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004685 cards_found++;
4686 return 0;
4687}
4688
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004689netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4690{
4691 return qlge_send(skb, ndev);
4692}
4693
4694int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4695{
4696 return ql_clean_inbound_rx_ring(rx_ring, budget);
4697}
4698
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004699static void __devexit qlge_remove(struct pci_dev *pdev)
4700{
4701 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004702 struct ql_adapter *qdev = netdev_priv(ndev);
4703 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004704 unregister_netdev(ndev);
4705 ql_release_all(pdev);
4706 pci_disable_device(pdev);
4707 free_netdev(ndev);
4708}
4709
Ron Mercer6d190c62009-10-28 08:39:20 +00004710/* Clean up resources without touching hardware. */
4711static void ql_eeh_close(struct net_device *ndev)
4712{
4713 int i;
4714 struct ql_adapter *qdev = netdev_priv(ndev);
4715
4716 if (netif_carrier_ok(ndev)) {
4717 netif_carrier_off(ndev);
4718 netif_stop_queue(ndev);
4719 }
4720
4721 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4722 cancel_delayed_work_sync(&qdev->asic_reset_work);
4723 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4724 cancel_delayed_work_sync(&qdev->mpi_work);
4725 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004726 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercer6d190c62009-10-28 08:39:20 +00004727 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4728
4729 for (i = 0; i < qdev->rss_ring_count; i++)
4730 netif_napi_del(&qdev->rx_ring[i].napi);
4731
4732 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4733 ql_tx_ring_clean(qdev);
4734 ql_free_rx_buffers(qdev);
4735 ql_release_adapter_resources(qdev);
4736}
4737
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004738/*
4739 * This callback is called by the PCI subsystem whenever
4740 * a PCI bus error is detected.
4741 */
4742static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4743 enum pci_channel_state state)
4744{
4745 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004746 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004747
Ron Mercer6d190c62009-10-28 08:39:20 +00004748 switch (state) {
4749 case pci_channel_io_normal:
4750 return PCI_ERS_RESULT_CAN_RECOVER;
4751 case pci_channel_io_frozen:
4752 netif_device_detach(ndev);
4753 if (netif_running(ndev))
4754 ql_eeh_close(ndev);
4755 pci_disable_device(pdev);
4756 return PCI_ERS_RESULT_NEED_RESET;
4757 case pci_channel_io_perm_failure:
4758 dev_err(&pdev->dev,
4759 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004760 ql_eeh_close(ndev);
4761 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004762 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004763 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004764
4765 /* Request a slot reset. */
4766 return PCI_ERS_RESULT_NEED_RESET;
4767}
4768
4769/*
4770 * This callback is called after the PCI buss has been reset.
4771 * Basically, this tries to restart the card from scratch.
4772 * This is a shortened version of the device probe/discovery code,
4773 * it resembles the first-half of the () routine.
4774 */
4775static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4776{
4777 struct net_device *ndev = pci_get_drvdata(pdev);
4778 struct ql_adapter *qdev = netdev_priv(ndev);
4779
Ron Mercer6d190c62009-10-28 08:39:20 +00004780 pdev->error_state = pci_channel_io_normal;
4781
4782 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004783 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004784 netif_err(qdev, ifup, qdev->ndev,
4785 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004786 return PCI_ERS_RESULT_DISCONNECT;
4787 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004788 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004789
4790 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004791 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004792 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004793 return PCI_ERS_RESULT_DISCONNECT;
4794 }
4795
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004796 return PCI_ERS_RESULT_RECOVERED;
4797}
4798
4799static void qlge_io_resume(struct pci_dev *pdev)
4800{
4801 struct net_device *ndev = pci_get_drvdata(pdev);
4802 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004803 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004804
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004805 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004806 err = qlge_open(ndev);
4807 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004808 netif_err(qdev, ifup, qdev->ndev,
4809 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004810 return;
4811 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004812 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004813 netif_err(qdev, ifup, qdev->ndev,
4814 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004815 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004816 qdev->timer.expires = jiffies + (5*HZ);
4817 add_timer(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004818 netif_device_attach(ndev);
4819}
4820
4821static struct pci_error_handlers qlge_err_handler = {
4822 .error_detected = qlge_io_error_detected,
4823 .slot_reset = qlge_io_slot_reset,
4824 .resume = qlge_io_resume,
4825};
4826
4827static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4828{
4829 struct net_device *ndev = pci_get_drvdata(pdev);
4830 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004831 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004832
4833 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004834 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004835
4836 if (netif_running(ndev)) {
4837 err = ql_adapter_down(qdev);
4838 if (!err)
4839 return err;
4840 }
4841
Ron Mercerbc083ce2009-10-21 11:07:40 +00004842 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004843 err = pci_save_state(pdev);
4844 if (err)
4845 return err;
4846
4847 pci_disable_device(pdev);
4848
4849 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4850
4851 return 0;
4852}
4853
David S. Miller04da2cf2008-09-19 16:14:24 -07004854#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004855static int qlge_resume(struct pci_dev *pdev)
4856{
4857 struct net_device *ndev = pci_get_drvdata(pdev);
4858 struct ql_adapter *qdev = netdev_priv(ndev);
4859 int err;
4860
4861 pci_set_power_state(pdev, PCI_D0);
4862 pci_restore_state(pdev);
4863 err = pci_enable_device(pdev);
4864 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004865 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004866 return err;
4867 }
4868 pci_set_master(pdev);
4869
4870 pci_enable_wake(pdev, PCI_D3hot, 0);
4871 pci_enable_wake(pdev, PCI_D3cold, 0);
4872
4873 if (netif_running(ndev)) {
4874 err = ql_adapter_up(qdev);
4875 if (err)
4876 return err;
4877 }
4878
Ron Mercer15c052f2010-02-04 13:32:46 -08004879 qdev->timer.expires = jiffies + (5*HZ);
4880 add_timer(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004881 netif_device_attach(ndev);
4882
4883 return 0;
4884}
David S. Miller04da2cf2008-09-19 16:14:24 -07004885#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004886
4887static void qlge_shutdown(struct pci_dev *pdev)
4888{
4889 qlge_suspend(pdev, PMSG_SUSPEND);
4890}
4891
4892static struct pci_driver qlge_driver = {
4893 .name = DRV_NAME,
4894 .id_table = qlge_pci_tbl,
4895 .probe = qlge_probe,
4896 .remove = __devexit_p(qlge_remove),
4897#ifdef CONFIG_PM
4898 .suspend = qlge_suspend,
4899 .resume = qlge_resume,
4900#endif
4901 .shutdown = qlge_shutdown,
4902 .err_handler = &qlge_err_handler
4903};
4904
4905static int __init qlge_init_module(void)
4906{
4907 return pci_register_driver(&qlge_driver);
4908}
4909
4910static void __exit qlge_exit(void)
4911{
4912 pci_unregister_driver(&qlge_driver);
4913}
4914
4915module_init(qlge_init_module);
4916module_exit(qlge_exit);