blob: c9f9754f0784b7e09ec2ea968f0893647659c105 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
Ron Mercer8aae2602010-01-15 13:31:28 +000076static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000080 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000087
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000088static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000089 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040091 /* required last entry */
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
100 */
101static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102{
103 u32 sem_bits = 0;
104
105 switch (sem_mask) {
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 break;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 break;
112 case SEM_ICB_MASK:
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 break;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 break;
118 case SEM_FLASH_MASK:
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 break;
121 case SEM_PROBE_MASK:
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 break;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 break;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 break;
130 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400132 return -EINVAL;
133 }
134
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
137}
138
139int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000141 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400142 do {
143 if (!ql_sem_trylock(qdev, sem_mask))
144 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000145 udelay(100);
146 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 return -ETIMEDOUT;
148}
149
150void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151{
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
154}
155
156/* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160 */
161int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162{
163 u32 temp;
164 int count = UDELAY_COUNT;
165
166 while (count) {
167 temp = ql_read32(qdev, reg);
168
169 /* check for errors */
170 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000171 netif_alert(qdev, probe, qdev->ndev,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
173 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400174 return -EIO;
175 } else if (temp & bit)
176 return 0;
177 udelay(UDELAY_DELAY);
178 count--;
179 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000180 netif_alert(qdev, probe, qdev->ndev,
181 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400182 return -ETIMEDOUT;
183}
184
185/* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
187 */
188static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189{
190 int count = UDELAY_COUNT;
191 u32 temp;
192
193 while (count) {
194 temp = ql_read32(qdev, CFG);
195 if (temp & CFG_LE)
196 return -EIO;
197 if (!(temp & bit))
198 return 0;
199 udelay(UDELAY_DELAY);
200 count--;
201 }
202 return -ETIMEDOUT;
203}
204
205
206/* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
208 */
209int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211{
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 direction =
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220 PCI_DMA_FROMDEVICE;
221
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400225 return -ENOMEM;
226 }
227
Ron Mercer4322c5b2009-07-02 06:06:06 +0000228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 if (status)
230 return status;
231
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400232 status = ql_wait_cfg(qdev, bit);
233 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400236 goto exit;
237 }
238
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
245
246 /*
247 * Wait for the bit to clear after signaling hw.
248 */
249 status = ql_wait_cfg(qdev, bit);
250exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400252 pci_unmap_single(qdev->pdev, map, size, direction);
253 return status;
254}
255
256/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258 u32 *value)
259{
260 u32 offset = 0;
261 int status;
262
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400263 switch (type) {
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
266 {
267 status =
268 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400270 if (status)
271 goto exit;
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275 status =
276 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400278 if (status)
279 goto exit;
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289 status =
290 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400292 if (status)
293 goto exit;
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
296 status =
297 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400299 if (status)
300 goto exit;
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304 status =
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800306 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400307 if (status)
308 goto exit;
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310 }
311 break;
312 }
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
315 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000316 netif_crit(qdev, ifup, qdev->ndev,
317 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400318 status = -EPERM;
319 }
320exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 return status;
322}
323
324/* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
326 */
327static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328 u16 index)
329{
330 u32 offset = 0;
331 int status = 0;
332
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400333 switch (type) {
334 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000335 {
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
339
340 status =
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 if (status)
344 goto exit;
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
347 type | MAC_ADDR_E);
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
349 status =
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 if (status)
353 goto exit;
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
356 type | MAC_ADDR_E);
357
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
359 status =
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 if (status)
363 goto exit;
364 break;
365 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400366 case MAC_ADDR_TYPE_CAM_MAC:
367 {
368 u32 cam_output;
369 u32 upper = (addr[0] << 8) | addr[1];
370 u32 lower =
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372 (addr[5]);
373
Joe Perchesae9540f72010-02-09 11:49:52 +0000374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375 "Adding %s address %pM at index %d in the CAM.\n",
376 type == MAC_ADDR_TYPE_MULTI_MAC ?
377 "MULTICAST" : "UNICAST",
378 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400379
380 status =
381 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383 if (status)
384 goto exit;
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 type); /* type */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 status =
390 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400392 if (status)
393 goto exit;
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 type); /* type */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 status =
399 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400401 if (status)
402 goto exit;
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 type); /* type */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
409 */
Ron Mercer76b26692009-10-08 09:54:40 +0000410 cam_output = (CAM_OUT_ROUTE_NIC |
411 (qdev->
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
414 if (qdev->vlgrp)
415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400418 break;
419 }
420 case MAC_ADDR_TYPE_VLAN:
421 {
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
427 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000428 netif_info(qdev, ifup, qdev->ndev,
429 "%s VLAN ID %d %s the CAM.\n",
430 enable_bit ? "Adding" : "Removing",
431 index,
432 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400433
434 status =
435 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800436 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400437 if (status)
438 goto exit;
439 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440 (index << MAC_ADDR_IDX_SHIFT) | /* index */
441 type | /* type */
442 enable_bit); /* enable/disable */
443 break;
444 }
445 case MAC_ADDR_TYPE_MULTI_FLTR:
446 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000447 netif_crit(qdev, ifup, qdev->ndev,
448 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400449 status = -EPERM;
450 }
451exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400452 return status;
453}
454
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000455/* Set or clear MAC address in hardware. We sometimes
456 * have to clear it to prevent wrong frame routing
457 * especially in a bonding environment.
458 */
459static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
460{
461 int status;
462 char zero_mac_addr[ETH_ALEN];
463 char *addr;
464
465 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000466 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000469 } else {
470 memset(zero_mac_addr, 0, ETH_ALEN);
471 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000474 }
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476 if (status)
477 return status;
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000482 netif_err(qdev, ifup, qdev->ndev,
483 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000484 return status;
485}
486
Ron Mercer6a473302009-07-02 06:06:12 +0000487void ql_link_on(struct ql_adapter *qdev)
488{
Joe Perchesae9540f72010-02-09 11:49:52 +0000489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000490 netif_carrier_on(qdev->ndev);
491 ql_set_mac_addr(qdev, 1);
492}
493
494void ql_link_off(struct ql_adapter *qdev)
495{
Joe Perchesae9540f72010-02-09 11:49:52 +0000496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000497 netif_carrier_off(qdev->ndev);
498 ql_set_mac_addr(qdev, 0);
499}
500
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501/* Get a specific frame routing value from the CAM.
502 * Used for debug and reg dump.
503 */
504int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
505{
506 int status = 0;
507
Ron Mercer939678f2009-01-04 17:08:29 -0800508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400509 if (status)
510 goto exit;
511
512 ql_write32(qdev, RT_IDX,
513 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400515 if (status)
516 goto exit;
517 *value = ql_read32(qdev, RT_DATA);
518exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400519 return status;
520}
521
522/* The NIC function for this chip has 16 routing indexes. Each one can be used
523 * to route different frame types to various inbound queues. We send broadcast/
524 * multicast/error frames to the default queue for slow handling,
525 * and CAM hit/RSS frames to the fast handling queues.
526 */
527static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
528 int enable)
529{
Ron Mercer8587ea32009-02-23 10:42:15 +0000530 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400531 u32 value = 0;
532
Joe Perchesae9540f72010-02-09 11:49:52 +0000533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534 "%s %s mask %s the routing reg.\n",
535 enable ? "Adding" : "Removing",
536 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549 index == RT_IDX_UNUSED013 ? "UNUSED13" :
550 index == RT_IDX_UNUSED014 ? "UNUSED14" :
551 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552 "(Bad index != RT_IDX)",
553 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400554
555 switch (mask) {
556 case RT_IDX_CAM_HIT:
557 {
558 value = RT_IDX_DST_CAM_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
561 break;
562 }
563 case RT_IDX_VALID: /* Promiscuous Mode frames. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
571 {
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000577 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
578 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_IP_CSUM_ERR_SLOT <<
582 RT_IDX_IDX_SHIFT); /* index */
583 break;
584 }
585 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
586 {
587 value = RT_IDX_DST_DFLT_Q | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
590 RT_IDX_IDX_SHIFT); /* index */
591 break;
592 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400593 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
594 {
595 value = RT_IDX_DST_DFLT_Q | /* dest */
596 RT_IDX_TYPE_NICQ | /* type */
597 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
598 break;
599 }
600 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
601 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000602 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400603 RT_IDX_TYPE_NICQ | /* type */
604 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
605 break;
606 }
607 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
608 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000609 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400610 RT_IDX_TYPE_NICQ | /* type */
611 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
612 break;
613 }
614 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
615 {
616 value = RT_IDX_DST_RSS | /* dest */
617 RT_IDX_TYPE_NICQ | /* type */
618 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
619 break;
620 }
621 case 0: /* Clear the E-bit on an entry. */
622 {
623 value = RT_IDX_DST_DFLT_Q | /* dest */
624 RT_IDX_TYPE_NICQ | /* type */
625 (index << RT_IDX_IDX_SHIFT);/* index */
626 break;
627 }
628 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000629 netif_err(qdev, ifup, qdev->ndev,
630 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 status = -EPERM;
632 goto exit;
633 }
634
635 if (value) {
636 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
637 if (status)
638 goto exit;
639 value |= (enable ? RT_IDX_E : 0);
640 ql_write32(qdev, RT_IDX, value);
641 ql_write32(qdev, RT_DATA, enable ? mask : 0);
642 }
643exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400644 return status;
645}
646
647static void ql_enable_interrupts(struct ql_adapter *qdev)
648{
649 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
650}
651
652static void ql_disable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
655}
656
657/* If we're running with multiple MSI-X vectors then we enable on the fly.
658 * Otherwise, we may have multiple outstanding workers and don't want to
659 * enable until the last one finishes. In this case, the irq_cnt gets
660 * incremented everytime we queue a worker and decremented everytime
661 * a worker finishes. Once it hits zero we enable the interrupt.
662 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700663u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400664{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700665 u32 var = 0;
666 unsigned long hw_flags = 0;
667 struct intr_context *ctx = qdev->intr_context + intr;
668
669 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
670 /* Always enable if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_en_mask);
675 var = ql_read32(qdev, STS);
676 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678
679 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
680 if (atomic_dec_and_test(&ctx->irq_cnt)) {
681 ql_write32(qdev, INTR_EN,
682 ctx->intr_en_mask);
683 var = ql_read32(qdev, STS);
684 }
685 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
686 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400687}
688
689static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
690{
691 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700692 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693
Ron Mercerbb0d2152008-10-20 10:30:26 -0700694 /* HW disables for us if we're MSIX multi interrupts and
695 * it's not the default (zeroeth) interrupt.
696 */
697 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
698 return 0;
699
700 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000701 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700702 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400703 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700704 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400705 var = ql_read32(qdev, STS);
706 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000708 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400709 return var;
710}
711
712static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
713{
714 int i;
715 for (i = 0; i < qdev->intr_count; i++) {
716 /* The enable call does a atomic_dec_and_test
717 * and enables only if the result is zero.
718 * So we precharge it here.
719 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700720 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
721 i == 0))
722 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400723 ql_enable_completion_interrupt(qdev, i);
724 }
725
726}
727
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000728static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
729{
730 int status, i;
731 u16 csum = 0;
732 __le16 *flash = (__le16 *)&qdev->flash;
733
734 status = strncmp((char *)&qdev->flash, str, 4);
735 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000736 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000737 return status;
738 }
739
740 for (i = 0; i < size; i++)
741 csum += le16_to_cpu(*flash++);
742
743 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000744 netif_err(qdev, ifup, qdev->ndev,
745 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000746
747 return csum;
748}
749
Ron Mercer26351472009-02-02 13:53:57 -0800750static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400751{
752 int status = 0;
753 /* wait for reg to come ready */
754 status = ql_wait_reg_rdy(qdev,
755 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
756 if (status)
757 goto exit;
758 /* set up for reg read */
759 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
760 /* wait for reg to come ready */
761 status = ql_wait_reg_rdy(qdev,
762 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
763 if (status)
764 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800765 /* This data is stored on flash as an array of
766 * __le32. Since ql_read32() returns cpu endian
767 * we need to swap it back.
768 */
769 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400770exit:
771 return status;
772}
773
Ron Mercercdca8d02009-03-02 08:07:31 +0000774static int ql_get_8000_flash_params(struct ql_adapter *qdev)
775{
776 u32 i, size;
777 int status;
778 __le32 *p = (__le32 *)&qdev->flash;
779 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000780 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000781
782 /* Get flash offset for function and adjust
783 * for dword access.
784 */
Ron Mercere4552f52009-06-09 05:39:32 +0000785 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000786 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
787 else
788 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
789
790 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
791 return -ETIMEDOUT;
792
793 size = sizeof(struct flash_params_8000) / sizeof(u32);
794 for (i = 0; i < size; i++, p++) {
795 status = ql_read_flash_word(qdev, i+offset, p);
796 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000797 netif_err(qdev, ifup, qdev->ndev,
798 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000799 goto exit;
800 }
801 }
802
803 status = ql_validate_flash(qdev,
804 sizeof(struct flash_params_8000) / sizeof(u16),
805 "8000");
806 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000807 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000808 status = -EINVAL;
809 goto exit;
810 }
811
Ron Mercer542512e2009-06-09 05:39:33 +0000812 /* Extract either manufacturer or BOFM modified
813 * MAC address.
814 */
815 if (qdev->flash.flash_params_8000.data_type1 == 2)
816 memcpy(mac_addr,
817 qdev->flash.flash_params_8000.mac_addr1,
818 qdev->ndev->addr_len);
819 else
820 memcpy(mac_addr,
821 qdev->flash.flash_params_8000.mac_addr,
822 qdev->ndev->addr_len);
823
824 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000825 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000826 status = -EINVAL;
827 goto exit;
828 }
829
830 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000831 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000832 qdev->ndev->addr_len);
833
834exit:
835 ql_sem_unlock(qdev, SEM_FLASH_MASK);
836 return status;
837}
838
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000839static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400840{
841 int i;
842 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800843 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800844 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000845 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800846
847 /* Second function's parameters follow the first
848 * function's.
849 */
Ron Mercere4552f52009-06-09 05:39:32 +0000850 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000851 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400852
853 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
854 return -ETIMEDOUT;
855
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000856 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800857 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400858 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000859 netif_err(qdev, ifup, qdev->ndev,
860 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400861 goto exit;
862 }
863
864 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000865
866 status = ql_validate_flash(qdev,
867 sizeof(struct flash_params_8012) / sizeof(u16),
868 "8012");
869 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000870 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000871 status = -EINVAL;
872 goto exit;
873 }
874
875 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
876 status = -EINVAL;
877 goto exit;
878 }
879
880 memcpy(qdev->ndev->dev_addr,
881 qdev->flash.flash_params_8012.mac_addr,
882 qdev->ndev->addr_len);
883
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400884exit:
885 ql_sem_unlock(qdev, SEM_FLASH_MASK);
886 return status;
887}
888
889/* xgmac register are located behind the xgmac_addr and xgmac_data
890 * register pair. Each read/write requires us to wait for the ready
891 * bit before reading/writing the data.
892 */
893static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
894{
895 int status;
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 return status;
901 /* write the data to the data reg */
902 ql_write32(qdev, XGMAC_DATA, data);
903 /* trigger the write */
904 ql_write32(qdev, XGMAC_ADDR, reg);
905 return status;
906}
907
908/* xgmac register are located behind the xgmac_addr and xgmac_data
909 * register pair. Each read/write requires us to wait for the ready
910 * bit before reading/writing the data.
911 */
912int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
913{
914 int status = 0;
915 /* wait for reg to come ready */
916 status = ql_wait_reg_rdy(qdev,
917 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
918 if (status)
919 goto exit;
920 /* set up for reg read */
921 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
922 /* wait for reg to come ready */
923 status = ql_wait_reg_rdy(qdev,
924 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
925 if (status)
926 goto exit;
927 /* get the data */
928 *data = ql_read32(qdev, XGMAC_DATA);
929exit:
930 return status;
931}
932
933/* This is used for reading the 64-bit statistics regs. */
934int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
935{
936 int status = 0;
937 u32 hi = 0;
938 u32 lo = 0;
939
940 status = ql_read_xgmac_reg(qdev, reg, &lo);
941 if (status)
942 goto exit;
943
944 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
945 if (status)
946 goto exit;
947
948 *data = (u64) lo | ((u64) hi << 32);
949
950exit:
951 return status;
952}
953
Ron Mercercdca8d02009-03-02 08:07:31 +0000954static int ql_8000_port_initialize(struct ql_adapter *qdev)
955{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000956 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000957 /*
958 * Get MPI firmware version for driver banner
959 * and ethool info.
960 */
961 status = ql_mb_about_fw(qdev);
962 if (status)
963 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000964 status = ql_mb_get_fw_state(qdev);
965 if (status)
966 goto exit;
967 /* Wake up a worker to get/set the TX/RX frame sizes. */
968 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
969exit:
970 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000971}
972
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400973/* Take the MAC Core out of reset.
974 * Enable statistics counting.
975 * Take the transmitter/receiver out of reset.
976 * This functionality may be done in the MPI firmware at a
977 * later date.
978 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000979static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400980{
981 int status = 0;
982 u32 data;
983
984 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
985 /* Another function has the semaphore, so
986 * wait for the port init bit to come ready.
987 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000988 netif_info(qdev, link, qdev->ndev,
989 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400990 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
991 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000992 netif_crit(qdev, link, qdev->ndev,
993 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400994 }
995 return status;
996 }
997
Joe Perchesae9540f72010-02-09 11:49:52 +0000998 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400999 /* Set the core reset. */
1000 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1001 if (status)
1002 goto end;
1003 data |= GLOBAL_CFG_RESET;
1004 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1005 if (status)
1006 goto end;
1007
1008 /* Clear the core reset and turn on jumbo for receiver. */
1009 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1010 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1011 data |= GLOBAL_CFG_TX_STAT_EN;
1012 data |= GLOBAL_CFG_RX_STAT_EN;
1013 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1014 if (status)
1015 goto end;
1016
1017 /* Enable transmitter, and clear it's reset. */
1018 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1019 if (status)
1020 goto end;
1021 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1022 data |= TX_CFG_EN; /* Enable the transmitter. */
1023 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1024 if (status)
1025 goto end;
1026
1027 /* Enable receiver and clear it's reset. */
1028 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1029 if (status)
1030 goto end;
1031 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1032 data |= RX_CFG_EN; /* Enable the receiver. */
1033 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1034 if (status)
1035 goto end;
1036
1037 /* Turn on jumbo. */
1038 status =
1039 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1040 if (status)
1041 goto end;
1042 status =
1043 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1044 if (status)
1045 goto end;
1046
1047 /* Signal to the world that the port is enabled. */
1048 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1049end:
1050 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1051 return status;
1052}
1053
Ron Mercer7c734352009-10-19 03:32:19 +00001054static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1055{
1056 return PAGE_SIZE << qdev->lbq_buf_order;
1057}
1058
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001059/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001060static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001061{
1062 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1063 rx_ring->lbq_curr_idx++;
1064 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1065 rx_ring->lbq_curr_idx = 0;
1066 rx_ring->lbq_free_cnt++;
1067 return lbq_desc;
1068}
1069
Ron Mercer7c734352009-10-19 03:32:19 +00001070static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1071 struct rx_ring *rx_ring)
1072{
1073 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1074
1075 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001076 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001077 rx_ring->lbq_buf_size,
1078 PCI_DMA_FROMDEVICE);
1079
1080 /* If it's the last chunk of our master page then
1081 * we unmap it.
1082 */
1083 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1084 == ql_lbq_block_size(qdev))
1085 pci_unmap_page(qdev->pdev,
1086 lbq_desc->p.pg_chunk.map,
1087 ql_lbq_block_size(qdev),
1088 PCI_DMA_FROMDEVICE);
1089 return lbq_desc;
1090}
1091
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001092/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001093static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001094{
1095 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1096 rx_ring->sbq_curr_idx++;
1097 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1098 rx_ring->sbq_curr_idx = 0;
1099 rx_ring->sbq_free_cnt++;
1100 return sbq_desc;
1101}
1102
1103/* Update an rx ring index. */
1104static void ql_update_cq(struct rx_ring *rx_ring)
1105{
1106 rx_ring->cnsmr_idx++;
1107 rx_ring->curr_entry++;
1108 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1109 rx_ring->cnsmr_idx = 0;
1110 rx_ring->curr_entry = rx_ring->cq_base;
1111 }
1112}
1113
1114static void ql_write_cq_idx(struct rx_ring *rx_ring)
1115{
1116 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1117}
1118
Ron Mercer7c734352009-10-19 03:32:19 +00001119static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1120 struct bq_desc *lbq_desc)
1121{
1122 if (!rx_ring->pg_chunk.page) {
1123 u64 map;
1124 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1125 GFP_ATOMIC,
1126 qdev->lbq_buf_order);
1127 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001128 netif_err(qdev, drv, qdev->ndev,
1129 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001130 return -ENOMEM;
1131 }
1132 rx_ring->pg_chunk.offset = 0;
1133 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1134 0, ql_lbq_block_size(qdev),
1135 PCI_DMA_FROMDEVICE);
1136 if (pci_dma_mapping_error(qdev->pdev, map)) {
1137 __free_pages(rx_ring->pg_chunk.page,
1138 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001139 netif_err(qdev, drv, qdev->ndev,
1140 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001141 return -ENOMEM;
1142 }
1143 rx_ring->pg_chunk.map = map;
1144 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1145 }
1146
1147 /* Copy the current master pg_chunk info
1148 * to the current descriptor.
1149 */
1150 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1151
1152 /* Adjust the master page chunk for next
1153 * buffer get.
1154 */
1155 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1156 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1157 rx_ring->pg_chunk.page = NULL;
1158 lbq_desc->p.pg_chunk.last_flag = 1;
1159 } else {
1160 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1161 get_page(rx_ring->pg_chunk.page);
1162 lbq_desc->p.pg_chunk.last_flag = 0;
1163 }
1164 return 0;
1165}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001166/* Process (refill) a large buffer queue. */
1167static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1168{
Ron Mercer49f21862009-02-23 10:42:16 +00001169 u32 clean_idx = rx_ring->lbq_clean_idx;
1170 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001172 u64 map;
1173 int i;
1174
Ron Mercer7c734352009-10-19 03:32:19 +00001175 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001177 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1178 "lbq: try cleaning clean_idx = %d.\n",
1179 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001180 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001181 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_err(qdev, ifup, qdev->ndev,
1183 "Could not get a page chunk.\n");
1184 return;
1185 }
Ron Mercer7c734352009-10-19 03:32:19 +00001186
1187 map = lbq_desc->p.pg_chunk.map +
1188 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001189 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1190 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001191 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001192 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001193
1194 pci_dma_sync_single_for_device(qdev->pdev, map,
1195 rx_ring->lbq_buf_size,
1196 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 clean_idx++;
1198 if (clean_idx == rx_ring->lbq_len)
1199 clean_idx = 0;
1200 }
1201
1202 rx_ring->lbq_clean_idx = clean_idx;
1203 rx_ring->lbq_prod_idx += 16;
1204 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1205 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001206 rx_ring->lbq_free_cnt -= 16;
1207 }
1208
1209 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001210 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1211 "lbq: updating prod idx = %d.\n",
1212 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 ql_write_db_reg(rx_ring->lbq_prod_idx,
1214 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001215 }
1216}
1217
1218/* Process (refill) a small buffer queue. */
1219static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1220{
Ron Mercer49f21862009-02-23 10:42:16 +00001221 u32 clean_idx = rx_ring->sbq_clean_idx;
1222 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001223 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001224 u64 map;
1225 int i;
1226
1227 while (rx_ring->sbq_free_cnt > 16) {
1228 for (i = 0; i < 16; i++) {
1229 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1231 "sbq: try cleaning clean_idx = %d.\n",
1232 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001233 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001234 netif_printk(qdev, rx_status, KERN_DEBUG,
1235 qdev->ndev,
1236 "sbq: getting new skb for index %d.\n",
1237 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001238 sbq_desc->p.skb =
1239 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001240 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001241 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001242 netif_err(qdev, probe, qdev->ndev,
1243 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001244 rx_ring->sbq_clean_idx = clean_idx;
1245 return;
1246 }
1247 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1248 map = pci_map_single(qdev->pdev,
1249 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001250 rx_ring->sbq_buf_size,
1251 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001252 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001253 netif_err(qdev, ifup, qdev->ndev,
1254 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001255 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001256 dev_kfree_skb_any(sbq_desc->p.skb);
1257 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001258 return;
1259 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001260 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1261 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001262 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001263 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001264 }
1265
1266 clean_idx++;
1267 if (clean_idx == rx_ring->sbq_len)
1268 clean_idx = 0;
1269 }
1270 rx_ring->sbq_clean_idx = clean_idx;
1271 rx_ring->sbq_prod_idx += 16;
1272 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1273 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001274 rx_ring->sbq_free_cnt -= 16;
1275 }
1276
1277 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1279 "sbq: updating prod idx = %d.\n",
1280 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001281 ql_write_db_reg(rx_ring->sbq_prod_idx,
1282 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001283 }
1284}
1285
1286static void ql_update_buffer_queues(struct ql_adapter *qdev,
1287 struct rx_ring *rx_ring)
1288{
1289 ql_update_sbq(qdev, rx_ring);
1290 ql_update_lbq(qdev, rx_ring);
1291}
1292
1293/* Unmaps tx buffers. Can be called from send() if a pci mapping
1294 * fails at some stage, or from the interrupt when a tx completes.
1295 */
1296static void ql_unmap_send(struct ql_adapter *qdev,
1297 struct tx_ring_desc *tx_ring_desc, int mapped)
1298{
1299 int i;
1300 for (i = 0; i < mapped; i++) {
1301 if (i == 0 || (i == 7 && mapped > 7)) {
1302 /*
1303 * Unmap the skb->data area, or the
1304 * external sglist (AKA the Outbound
1305 * Address List (OAL)).
1306 * If its the zeroeth element, then it's
1307 * the skb->data area. If it's the 7th
1308 * element and there is more than 6 frags,
1309 * then its an OAL.
1310 */
1311 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001312 netif_printk(qdev, tx_done, KERN_DEBUG,
1313 qdev->ndev,
1314 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001315 }
1316 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001317 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001318 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001319 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001320 maplen),
1321 PCI_DMA_TODEVICE);
1322 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001323 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1324 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001325 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001326 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001327 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001328 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001329 maplen), PCI_DMA_TODEVICE);
1330 }
1331 }
1332
1333}
1334
1335/* Map the buffers for this transmit. This will return
1336 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1337 */
1338static int ql_map_send(struct ql_adapter *qdev,
1339 struct ob_mac_iocb_req *mac_iocb_ptr,
1340 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1341{
1342 int len = skb_headlen(skb);
1343 dma_addr_t map;
1344 int frag_idx, err, map_idx = 0;
1345 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1346 int frag_cnt = skb_shinfo(skb)->nr_frags;
1347
1348 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001349 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1350 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001351 }
1352 /*
1353 * Map the skb buffer first.
1354 */
1355 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1356
1357 err = pci_dma_mapping_error(qdev->pdev, map);
1358 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001359 netif_err(qdev, tx_queued, qdev->ndev,
1360 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001361
1362 return NETDEV_TX_BUSY;
1363 }
1364
1365 tbd->len = cpu_to_le32(len);
1366 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001367 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1368 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001369 map_idx++;
1370
1371 /*
1372 * This loop fills the remainder of the 8 address descriptors
1373 * in the IOCB. If there are more than 7 fragments, then the
1374 * eighth address desc will point to an external list (OAL).
1375 * When this happens, the remainder of the frags will be stored
1376 * in this list.
1377 */
1378 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1379 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1380 tbd++;
1381 if (frag_idx == 6 && frag_cnt > 7) {
1382 /* Let's tack on an sglist.
1383 * Our control block will now
1384 * look like this:
1385 * iocb->seg[0] = skb->data
1386 * iocb->seg[1] = frag[0]
1387 * iocb->seg[2] = frag[1]
1388 * iocb->seg[3] = frag[2]
1389 * iocb->seg[4] = frag[3]
1390 * iocb->seg[5] = frag[4]
1391 * iocb->seg[6] = frag[5]
1392 * iocb->seg[7] = ptr to OAL (external sglist)
1393 * oal->seg[0] = frag[6]
1394 * oal->seg[1] = frag[7]
1395 * oal->seg[2] = frag[8]
1396 * oal->seg[3] = frag[9]
1397 * oal->seg[4] = frag[10]
1398 * etc...
1399 */
1400 /* Tack on the OAL in the eighth segment of IOCB. */
1401 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1402 sizeof(struct oal),
1403 PCI_DMA_TODEVICE);
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping outbound address list with error: %d\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 /*
1414 * The length is the number of fragments
1415 * that remain to be mapped times the length
1416 * of our sglist (OAL).
1417 */
1418 tbd->len =
1419 cpu_to_le32((sizeof(struct tx_buf_desc) *
1420 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001421 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001422 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001423 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001424 sizeof(struct oal));
1425 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1426 map_idx++;
1427 }
1428
1429 map =
1430 pci_map_page(qdev->pdev, frag->page,
1431 frag->page_offset, frag->size,
1432 PCI_DMA_TODEVICE);
1433
1434 err = pci_dma_mapping_error(qdev->pdev, map);
1435 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001436 netif_err(qdev, tx_queued, qdev->ndev,
1437 "PCI mapping frags failed with error: %d.\n",
1438 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001439 goto map_error;
1440 }
1441
1442 tbd->addr = cpu_to_le64(map);
1443 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001444 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1445 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001446 frag->size);
1447
1448 }
1449 /* Save the number of segments we've mapped. */
1450 tx_ring_desc->map_cnt = map_idx;
1451 /* Terminate the last segment. */
1452 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1453 return NETDEV_TX_OK;
1454
1455map_error:
1456 /*
1457 * If the first frag mapping failed, then i will be zero.
1458 * This causes the unmap of the skb->data area. Otherwise
1459 * we pass in the number of frags that mapped successfully
1460 * so they can be umapped.
1461 */
1462 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1463 return NETDEV_TX_BUSY;
1464}
1465
Ron Mercer4f848c02010-01-02 10:37:43 +00001466/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001467static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1470 u32 length,
1471 u16 vlan_id)
1472{
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct skb_frag_struct *rx_frag;
1476 int nr_frags;
1477 struct napi_struct *napi = &rx_ring->napi;
1478
1479 napi->dev = qdev->ndev;
1480
1481 skb = napi_get_frags(napi);
1482 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001483 netif_err(qdev, drv, qdev->ndev,
1484 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001485 rx_ring->rx_dropped++;
1486 put_page(lbq_desc->p.pg_chunk.page);
1487 return;
1488 }
1489 prefetch(lbq_desc->p.pg_chunk.va);
1490 rx_frag = skb_shinfo(skb)->frags;
1491 nr_frags = skb_shinfo(skb)->nr_frags;
1492 rx_frag += nr_frags;
1493 rx_frag->page = lbq_desc->p.pg_chunk.page;
1494 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1495 rx_frag->size = length;
1496
1497 skb->len += length;
1498 skb->data_len += length;
1499 skb->truesize += length;
1500 skb_shinfo(skb)->nr_frags++;
1501
1502 rx_ring->rx_packets++;
1503 rx_ring->rx_bytes += length;
1504 skb->ip_summed = CHECKSUM_UNNECESSARY;
1505 skb_record_rx_queue(skb, rx_ring->cq_id);
1506 if (qdev->vlgrp && (vlan_id != 0xffff))
1507 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1508 else
1509 napi_gro_frags(napi);
1510}
1511
1512/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001513static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1514 struct rx_ring *rx_ring,
1515 struct ib_mac_iocb_rsp *ib_mac_rsp,
1516 u32 length,
1517 u16 vlan_id)
1518{
1519 struct net_device *ndev = qdev->ndev;
1520 struct sk_buff *skb = NULL;
1521 void *addr;
1522 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1523 struct napi_struct *napi = &rx_ring->napi;
1524
1525 skb = netdev_alloc_skb(ndev, length);
1526 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001527 netif_err(qdev, drv, qdev->ndev,
1528 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001529 rx_ring->rx_dropped++;
1530 put_page(lbq_desc->p.pg_chunk.page);
1531 return;
1532 }
1533
1534 addr = lbq_desc->p.pg_chunk.va;
1535 prefetch(addr);
1536
1537
1538 /* Frame error, so drop the packet. */
1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001540 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001541 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001542 rx_ring->rx_errors++;
1543 goto err_out;
1544 }
1545
1546 /* The max framesize filter on this chip is set higher than
1547 * MTU since FCoE uses 2k frames.
1548 */
1549 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001550 netif_err(qdev, drv, qdev->ndev,
1551 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001552 rx_ring->rx_dropped++;
1553 goto err_out;
1554 }
1555 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001556 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1557 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1558 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001559 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1560 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1561 length-ETH_HLEN);
1562 skb->len += length-ETH_HLEN;
1563 skb->data_len += length-ETH_HLEN;
1564 skb->truesize += length-ETH_HLEN;
1565
1566 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev);
1569 skb->ip_summed = CHECKSUM_NONE;
1570
1571 if (qdev->rx_csum &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1573 /* TCP frame. */
1574 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001575 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1576 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001577 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1579 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1580 /* Unfragmented ipv4 UDP frame. */
1581 struct iphdr *iph = (struct iphdr *) skb->data;
1582 if (!(iph->frag_off &
1583 cpu_to_be16(IP_MF|IP_OFFSET))) {
1584 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001585 netif_printk(qdev, rx_status, KERN_DEBUG,
1586 qdev->ndev,
1587 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001588 }
1589 }
1590 }
1591
1592 skb_record_rx_queue(skb, rx_ring->cq_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1594 if (qdev->vlgrp && (vlan_id != 0xffff))
1595 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1596 else
1597 napi_gro_receive(napi, skb);
1598 } else {
1599 if (qdev->vlgrp && (vlan_id != 0xffff))
1600 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1601 else
1602 netif_receive_skb(skb);
1603 }
1604 return;
1605err_out:
1606 dev_kfree_skb_any(skb);
1607 put_page(lbq_desc->p.pg_chunk.page);
1608}
1609
1610/* Process an inbound completion from an rx ring. */
1611static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1612 struct rx_ring *rx_ring,
1613 struct ib_mac_iocb_rsp *ib_mac_rsp,
1614 u32 length,
1615 u16 vlan_id)
1616{
1617 struct net_device *ndev = qdev->ndev;
1618 struct sk_buff *skb = NULL;
1619 struct sk_buff *new_skb = NULL;
1620 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1621
1622 skb = sbq_desc->p.skb;
1623 /* Allocate new_skb and copy */
1624 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1625 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001626 netif_err(qdev, probe, qdev->ndev,
1627 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001628 rx_ring->rx_dropped++;
1629 return;
1630 }
1631 skb_reserve(new_skb, NET_IP_ALIGN);
1632 memcpy(skb_put(new_skb, length), skb->data, length);
1633 skb = new_skb;
1634
1635 /* Frame error, so drop the packet. */
1636 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001637 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001638 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_errors++;
1641 return;
1642 }
1643
1644 /* loopback self test for ethtool */
1645 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1646 ql_check_lb_frame(qdev, skb);
1647 dev_kfree_skb_any(skb);
1648 return;
1649 }
1650
1651 /* The max framesize filter on this chip is set higher than
1652 * MTU since FCoE uses 2k frames.
1653 */
1654 if (skb->len > ndev->mtu + ETH_HLEN) {
1655 dev_kfree_skb_any(skb);
1656 rx_ring->rx_dropped++;
1657 return;
1658 }
1659
1660 prefetch(skb->data);
1661 skb->dev = ndev;
1662 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001663 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1664 "%s Multicast.\n",
1665 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1666 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1667 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1668 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001671 }
1672 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001673 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1674 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001675
1676 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev);
1679 skb->ip_summed = CHECKSUM_NONE;
1680
1681 /* If rx checksum is on, and there are no
1682 * csum or frame errors.
1683 */
1684 if (qdev->rx_csum &&
1685 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1686 /* TCP frame. */
1687 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1692 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1693 /* Unfragmented ipv4 UDP frame. */
1694 struct iphdr *iph = (struct iphdr *) skb->data;
1695 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001696 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001697 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001698 netif_printk(qdev, rx_status, KERN_DEBUG,
1699 qdev->ndev,
1700 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001701 }
1702 }
1703 }
1704
1705 skb_record_rx_queue(skb, rx_ring->cq_id);
1706 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1707 if (qdev->vlgrp && (vlan_id != 0xffff))
1708 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1709 vlan_id, skb);
1710 else
1711 napi_gro_receive(&rx_ring->napi, skb);
1712 } else {
1713 if (qdev->vlgrp && (vlan_id != 0xffff))
1714 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1715 else
1716 netif_receive_skb(skb);
1717 }
1718}
1719
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001720static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001721{
1722 void *temp_addr = skb->data;
1723
1724 /* Undo the skb_reserve(skb,32) we did before
1725 * giving to hardware, and realign data on
1726 * a 2-byte boundary.
1727 */
1728 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1729 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1730 skb_copy_to_linear_data(skb, temp_addr,
1731 (unsigned int)len);
1732}
1733
1734/*
1735 * This function builds an skb for the given inbound
1736 * completion. It will be rewritten for readability in the near
1737 * future, but for not it works well.
1738 */
1739static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1740 struct rx_ring *rx_ring,
1741 struct ib_mac_iocb_rsp *ib_mac_rsp)
1742{
1743 struct bq_desc *lbq_desc;
1744 struct bq_desc *sbq_desc;
1745 struct sk_buff *skb = NULL;
1746 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1747 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1748
1749 /*
1750 * Handle the header buffer if present.
1751 */
1752 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1753 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001754 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756 /*
1757 * Headers fit nicely into a small buffer.
1758 */
1759 sbq_desc = ql_get_curr_sbuf(rx_ring);
1760 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001761 dma_unmap_addr(sbq_desc, mapaddr),
1762 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001763 PCI_DMA_FROMDEVICE);
1764 skb = sbq_desc->p.skb;
1765 ql_realign_skb(skb, hdr_len);
1766 skb_put(skb, hdr_len);
1767 sbq_desc->p.skb = NULL;
1768 }
1769
1770 /*
1771 * Handle the data buffer(s).
1772 */
1773 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001776 return skb;
1777 }
1778
1779 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1780 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001781 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1782 "Headers in small, data of %d bytes in small, combine them.\n",
1783 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001784 /*
1785 * Data is less than small buffer size so it's
1786 * stuffed in a small buffer.
1787 * For this case we append the data
1788 * from the "data" small buffer to the "header" small
1789 * buffer.
1790 */
1791 sbq_desc = ql_get_curr_sbuf(rx_ring);
1792 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001793 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001794 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001795 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 (sbq_desc, maplen),
1797 PCI_DMA_FROMDEVICE);
1798 memcpy(skb_put(skb, length),
1799 sbq_desc->p.skb->data, length);
1800 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001801 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001802 (sbq_desc,
1803 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 (sbq_desc,
1806 maplen),
1807 PCI_DMA_FROMDEVICE);
1808 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001809 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1810 "%d bytes in a single small buffer.\n",
1811 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001812 sbq_desc = ql_get_curr_sbuf(rx_ring);
1813 skb = sbq_desc->p.skb;
1814 ql_realign_skb(skb, length);
1815 skb_put(skb, length);
1816 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001817 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001818 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001819 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001820 maplen),
1821 PCI_DMA_FROMDEVICE);
1822 sbq_desc->p.skb = NULL;
1823 }
1824 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1825 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001826 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1827 "Header in small, %d bytes in large. Chain large to small!\n",
1828 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001829 /*
1830 * The data is in a single large buffer. We
1831 * chain it to the header buffer's skb and let
1832 * it rip.
1833 */
Ron Mercer7c734352009-10-19 03:32:19 +00001834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001835 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1836 "Chaining page at offset = %d, for %d bytes to skb.\n",
1837 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001838 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1839 lbq_desc->p.pg_chunk.offset,
1840 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001841 skb->len += length;
1842 skb->data_len += length;
1843 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001844 } else {
1845 /*
1846 * The headers and data are in a single large buffer. We
1847 * copy it to a new skb and let it go. This can happen with
1848 * jumbo mtu on a non-TCP/UDP frame.
1849 */
Ron Mercer7c734352009-10-19 03:32:19 +00001850 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001851 skb = netdev_alloc_skb(qdev->ndev, length);
1852 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001853 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1854 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 return NULL;
1856 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001857 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001858 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001859 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001860 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001861 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001862 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001863 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1865 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001866 skb_fill_page_desc(skb, 0,
1867 lbq_desc->p.pg_chunk.page,
1868 lbq_desc->p.pg_chunk.offset,
1869 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001870 skb->len += length;
1871 skb->data_len += length;
1872 skb->truesize += length;
1873 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001874 __pskb_pull_tail(skb,
1875 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1876 VLAN_ETH_HLEN : ETH_HLEN);
1877 }
1878 } else {
1879 /*
1880 * The data is in a chain of large buffers
1881 * pointed to by a small buffer. We loop
1882 * thru and chain them to the our small header
1883 * buffer's skb.
1884 * frags: There are 18 max frags and our small
1885 * buffer will hold 32 of them. The thing is,
1886 * we'll use 3 max for our 9000 byte jumbo
1887 * frames. If the MTU goes up we could
1888 * eventually be in trouble.
1889 */
Ron Mercer7c734352009-10-19 03:32:19 +00001890 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001891 sbq_desc = ql_get_curr_sbuf(rx_ring);
1892 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001893 dma_unmap_addr(sbq_desc, mapaddr),
1894 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895 PCI_DMA_FROMDEVICE);
1896 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1897 /*
1898 * This is an non TCP/UDP IP frame, so
1899 * the headers aren't split into a small
1900 * buffer. We have to use the small buffer
1901 * that contains our sg list as our skb to
1902 * send upstairs. Copy the sg list here to
1903 * a local buffer and use it to find the
1904 * pages to chain.
1905 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001906 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1907 "%d bytes of headers & data in chain of large.\n",
1908 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001909 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001910 sbq_desc->p.skb = NULL;
1911 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001912 }
1913 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001914 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1915 size = (length < rx_ring->lbq_buf_size) ? length :
1916 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001917
Joe Perchesae9540f72010-02-09 11:49:52 +00001918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "Adding page %d to skb for %d bytes.\n",
1920 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001921 skb_fill_page_desc(skb, i,
1922 lbq_desc->p.pg_chunk.page,
1923 lbq_desc->p.pg_chunk.offset,
1924 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001925 skb->len += size;
1926 skb->data_len += size;
1927 skb->truesize += size;
1928 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929 i++;
1930 }
1931 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1932 VLAN_ETH_HLEN : ETH_HLEN);
1933 }
1934 return skb;
1935}
1936
1937/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001938static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001939 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001940 struct ib_mac_iocb_rsp *ib_mac_rsp,
1941 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942{
1943 struct net_device *ndev = qdev->ndev;
1944 struct sk_buff *skb = NULL;
1945
1946 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1947
1948 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1949 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001952 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001953 return;
1954 }
1955
Ron Mercera32959c2009-06-09 05:39:27 +00001956 /* Frame error, so drop the packet. */
1957 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001958 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001959 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001960 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001961 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001962 return;
1963 }
Ron Mercerec33a492009-06-09 05:39:28 +00001964
1965 /* The max framesize filter on this chip is set higher than
1966 * MTU since FCoE uses 2k frames.
1967 */
1968 if (skb->len > ndev->mtu + ETH_HLEN) {
1969 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001970 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001971 return;
1972 }
1973
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001974 /* loopback self test for ethtool */
1975 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1976 ql_check_lb_frame(qdev, skb);
1977 dev_kfree_skb_any(skb);
1978 return;
1979 }
1980
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001981 prefetch(skb->data);
1982 skb->dev = ndev;
1983 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001984 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1985 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1987 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1989 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1990 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001991 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001992 }
1993 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1995 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001996 }
Ron Mercerd555f592009-03-09 10:59:19 +00001997
Ron Mercerd555f592009-03-09 10:59:19 +00001998 skb->protocol = eth_type_trans(skb, ndev);
1999 skb->ip_summed = CHECKSUM_NONE;
2000
2001 /* If rx checksum is on, and there are no
2002 * csum or frame errors.
2003 */
2004 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00002005 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2006 /* TCP frame. */
2007 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002008 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2009 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002010 skb->ip_summed = CHECKSUM_UNNECESSARY;
2011 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2012 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2013 /* Unfragmented ipv4 UDP frame. */
2014 struct iphdr *iph = (struct iphdr *) skb->data;
2015 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002016 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002017 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002020 }
2021 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002022 }
Ron Mercerd555f592009-03-09 10:59:19 +00002023
Ron Mercer885ee392009-11-03 13:49:31 +00002024 rx_ring->rx_packets++;
2025 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002026 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002027 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2028 if (qdev->vlgrp &&
2029 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2030 (vlan_id != 0))
2031 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2032 vlan_id, skb);
2033 else
2034 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002035 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002036 if (qdev->vlgrp &&
2037 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2038 (vlan_id != 0))
2039 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2040 else
2041 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002042 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002043}
2044
Ron Mercer4f848c02010-01-02 10:37:43 +00002045/* Process an inbound completion from an rx ring. */
2046static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047 struct rx_ring *rx_ring,
2048 struct ib_mac_iocb_rsp *ib_mac_rsp)
2049{
2050 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2051 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2052 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2053 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2054
2055 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2056
2057 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2058 /* The data and headers are split into
2059 * separate buffers.
2060 */
2061 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2062 vlan_id);
2063 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2064 /* The data fit in a single small buffer.
2065 * Allocate a new skb, copy the data and
2066 * return the buffer to the free pool.
2067 */
2068 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2069 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002070 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2071 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2072 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2073 /* TCP packet in a page chunk that's been checksummed.
2074 * Tack it on to our GRO skb and let it go.
2075 */
2076 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2077 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002078 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2079 /* Non-TCP packet in a page chunk. Allocate an
2080 * skb, tack it on frags, and send it up.
2081 */
2082 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2083 length, vlan_id);
2084 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002085 /* Non-TCP/UDP large frames that span multiple buffers
2086 * can be processed corrrectly by the split frame logic.
2087 */
2088 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2089 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002090 }
2091
2092 return (unsigned long)length;
2093}
2094
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002095/* Process an outbound completion from an rx ring. */
2096static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2097 struct ob_mac_iocb_rsp *mac_rsp)
2098{
2099 struct tx_ring *tx_ring;
2100 struct tx_ring_desc *tx_ring_desc;
2101
2102 QL_DUMP_OB_MAC_RSP(mac_rsp);
2103 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2104 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2105 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002106 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2107 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002108 dev_kfree_skb(tx_ring_desc->skb);
2109 tx_ring_desc->skb = NULL;
2110
2111 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2112 OB_MAC_IOCB_RSP_S |
2113 OB_MAC_IOCB_RSP_L |
2114 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2115 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002116 netif_warn(qdev, tx_done, qdev->ndev,
2117 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002118 }
2119 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002120 netif_warn(qdev, tx_done, qdev->ndev,
2121 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 }
2123 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002124 netif_warn(qdev, tx_done, qdev->ndev,
2125 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 }
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002130 }
2131 }
2132 atomic_inc(&tx_ring->tx_count);
2133}
2134
2135/* Fire up a handler to reset the MPI processor. */
2136void ql_queue_fw_error(struct ql_adapter *qdev)
2137{
Ron Mercer6a473302009-07-02 06:06:12 +00002138 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002139 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2140}
2141
2142void ql_queue_asic_error(struct ql_adapter *qdev)
2143{
Ron Mercer6a473302009-07-02 06:06:12 +00002144 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002146 /* Clear adapter up bit to signal the recovery
2147 * process that it shouldn't kill the reset worker
2148 * thread
2149 */
2150 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002151 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2152}
2153
2154static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2155 struct ib_ae_iocb_rsp *ib_ae_rsp)
2156{
2157 switch (ib_ae_rsp->event) {
2158 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002159 netif_err(qdev, rx_err, qdev->ndev,
2160 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002161 ql_queue_fw_error(qdev);
2162 return;
2163
2164 case CAM_LOOKUP_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002165 netif_err(qdev, link, qdev->ndev,
2166 "Multiple CAM hits lookup occurred.\n");
2167 netif_err(qdev, drv, qdev->ndev,
2168 "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002169 ql_queue_asic_error(qdev);
2170 return;
2171
2172 case SOFT_ECC_ERROR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002173 netif_err(qdev, rx_err, qdev->ndev,
2174 "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002175 ql_queue_asic_error(qdev);
2176 break;
2177
2178 case PCI_ERR_ANON_BUF_RD:
Joe Perchesae9540f72010-02-09 11:49:52 +00002179 netif_err(qdev, rx_err, qdev->ndev,
2180 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2181 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002182 ql_queue_asic_error(qdev);
2183 break;
2184
2185 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002186 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2187 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002188 ql_queue_asic_error(qdev);
2189 break;
2190 }
2191}
2192
2193static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2194{
2195 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002196 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002197 struct ob_mac_iocb_rsp *net_rsp = NULL;
2198 int count = 0;
2199
Ron Mercer1e213302009-03-09 10:59:21 +00002200 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201 /* While there are entries in the completion queue. */
2202 while (prod != rx_ring->cnsmr_idx) {
2203
Joe Perchesae9540f72010-02-09 11:49:52 +00002204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002207
2208 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2209 rmb();
2210 switch (net_rsp->opcode) {
2211
2212 case OPCODE_OB_MAC_TSO_IOCB:
2213 case OPCODE_OB_MAC_IOCB:
2214 ql_process_mac_tx_intr(qdev, net_rsp);
2215 break;
2216 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002217 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2218 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2219 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002220 }
2221 count++;
2222 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002225 if (!net_rsp)
2226 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002227 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002228 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002229 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002230 if (atomic_read(&tx_ring->queue_stopped) &&
2231 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2232 /*
2233 * The queue got stopped because the tx_ring was full.
2234 * Wake it up, because it's now at least 25% empty.
2235 */
Ron Mercer1e213302009-03-09 10:59:21 +00002236 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002237 }
2238
2239 return count;
2240}
2241
2242static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2243{
2244 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002245 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002246 struct ql_net_rsp_iocb *net_rsp;
2247 int count = 0;
2248
2249 /* While there are entries in the completion queue. */
2250 while (prod != rx_ring->cnsmr_idx) {
2251
Joe Perchesae9540f72010-02-09 11:49:52 +00002252 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2253 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2254 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002255
2256 net_rsp = rx_ring->curr_entry;
2257 rmb();
2258 switch (net_rsp->opcode) {
2259 case OPCODE_IB_MAC_IOCB:
2260 ql_process_mac_rx_intr(qdev, rx_ring,
2261 (struct ib_mac_iocb_rsp *)
2262 net_rsp);
2263 break;
2264
2265 case OPCODE_IB_AE_IOCB:
2266 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2267 net_rsp);
2268 break;
2269 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002270 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2271 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2272 net_rsp->opcode);
2273 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002274 }
2275 count++;
2276 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002277 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002278 if (count == budget)
2279 break;
2280 }
2281 ql_update_buffer_queues(qdev, rx_ring);
2282 ql_write_cq_idx(rx_ring);
2283 return count;
2284}
2285
2286static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2287{
2288 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2289 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002290 struct rx_ring *trx_ring;
2291 int i, work_done = 0;
2292 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002293
Joe Perchesae9540f72010-02-09 11:49:52 +00002294 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2295 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002296
Ron Mercer39aa8162009-08-27 11:02:11 +00002297 /* Service the TX rings first. They start
2298 * right after the RSS rings. */
2299 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2300 trx_ring = &qdev->rx_ring[i];
2301 /* If this TX completion ring belongs to this vector and
2302 * it's not empty then service it.
2303 */
2304 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2305 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2306 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002307 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2308 "%s: Servicing TX completion ring %d.\n",
2309 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002310 ql_clean_outbound_rx_ring(trx_ring);
2311 }
2312 }
2313
2314 /*
2315 * Now service the RSS ring if it's active.
2316 */
2317 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2318 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002319 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2320 "%s: Servicing RX completion ring %d.\n",
2321 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002322 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2323 }
2324
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002325 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002326 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002327 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2328 }
2329 return work_done;
2330}
2331
Ron Mercer01e6b952009-10-30 12:13:34 +00002332static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002333{
2334 struct ql_adapter *qdev = netdev_priv(ndev);
2335
2336 qdev->vlgrp = grp;
2337 if (grp) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002338 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2339 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002340 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2341 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2342 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00002343 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2344 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2346 }
2347}
2348
Ron Mercer01e6b952009-10-30 12:13:34 +00002349static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002350{
2351 struct ql_adapter *qdev = netdev_priv(ndev);
2352 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002353 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002354
Ron Mercercc288f52009-02-23 10:42:14 +00002355 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2356 if (status)
2357 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002358 if (ql_set_mac_addr_reg
2359 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002360 netif_err(qdev, ifup, qdev->ndev,
2361 "Failed to init vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002362 }
Ron Mercercc288f52009-02-23 10:42:14 +00002363 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002364}
2365
Ron Mercer01e6b952009-10-30 12:13:34 +00002366static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002367{
2368 struct ql_adapter *qdev = netdev_priv(ndev);
2369 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002370 int status;
2371
2372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373 if (status)
2374 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002375
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002376 if (ql_set_mac_addr_reg
2377 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002378 netif_err(qdev, ifup, qdev->ndev,
2379 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002380 }
Ron Mercercc288f52009-02-23 10:42:14 +00002381 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002382
2383}
2384
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002385/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2386static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2387{
2388 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002389 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002390 return IRQ_HANDLED;
2391}
2392
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002393/* This handles a fatal error, MPI activity, and the default
2394 * rx_ring in an MSI-X multiple vector environment.
2395 * In MSI/Legacy environment it also process the rest of
2396 * the rx_rings.
2397 */
2398static irqreturn_t qlge_isr(int irq, void *dev_id)
2399{
2400 struct rx_ring *rx_ring = dev_id;
2401 struct ql_adapter *qdev = rx_ring->qdev;
2402 struct intr_context *intr_context = &qdev->intr_context[0];
2403 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002404 int work_done = 0;
2405
Ron Mercerbb0d2152008-10-20 10:30:26 -07002406 spin_lock(&qdev->hw_lock);
2407 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002408 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2409 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002410 spin_unlock(&qdev->hw_lock);
2411 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002412 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002413 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002414
Ron Mercerbb0d2152008-10-20 10:30:26 -07002415 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002416
2417 /*
2418 * Check for fatal error.
2419 */
2420 if (var & STS_FE) {
2421 ql_queue_asic_error(qdev);
Joe Perchesae9540f72010-02-09 11:49:52 +00002422 netif_err(qdev, intr, qdev->ndev,
2423 "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002424 var = ql_read32(qdev, ERR_STS);
Joe Perchesae9540f72010-02-09 11:49:52 +00002425 netif_err(qdev, intr, qdev->ndev,
2426 "Resetting chip. Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002427 return IRQ_HANDLED;
2428 }
2429
2430 /*
2431 * Check MPI processor activity.
2432 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002433 if ((var & STS_PI) &&
2434 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002435 /*
2436 * We've got an async event or mailbox completion.
2437 * Handle it and clear the source of the interrupt.
2438 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002439 netif_err(qdev, intr, qdev->ndev,
2440 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002441 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002442 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2443 queue_delayed_work_on(smp_processor_id(),
2444 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002445 work_done++;
2446 }
2447
2448 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002449 * Get the bit-mask that shows the active queues for this
2450 * pass. Compare it to the queues that this irq services
2451 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002452 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002453 var = ql_read32(qdev, ISR1);
2454 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002455 netif_info(qdev, intr, qdev->ndev,
2456 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002457 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002458 napi_schedule(&rx_ring->napi);
2459 work_done++;
2460 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002461 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002462 return work_done ? IRQ_HANDLED : IRQ_NONE;
2463}
2464
2465static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2466{
2467
2468 if (skb_is_gso(skb)) {
2469 int err;
2470 if (skb_header_cloned(skb)) {
2471 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2472 if (err)
2473 return err;
2474 }
2475
2476 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2477 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2478 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2479 mac_iocb_ptr->total_hdrs_len =
2480 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2481 mac_iocb_ptr->net_trans_offset =
2482 cpu_to_le16(skb_network_offset(skb) |
2483 skb_transport_offset(skb)
2484 << OB_MAC_TRANSPORT_HDR_SHIFT);
2485 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2486 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2487 if (likely(skb->protocol == htons(ETH_P_IP))) {
2488 struct iphdr *iph = ip_hdr(skb);
2489 iph->check = 0;
2490 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2491 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2492 iph->daddr, 0,
2493 IPPROTO_TCP,
2494 0);
2495 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2496 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2497 tcp_hdr(skb)->check =
2498 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2499 &ipv6_hdr(skb)->daddr,
2500 0, IPPROTO_TCP, 0);
2501 }
2502 return 1;
2503 }
2504 return 0;
2505}
2506
2507static void ql_hw_csum_setup(struct sk_buff *skb,
2508 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2509{
2510 int len;
2511 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002512 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002513 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2515 mac_iocb_ptr->net_trans_offset =
2516 cpu_to_le16(skb_network_offset(skb) |
2517 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2518
2519 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2520 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2521 if (likely(iph->protocol == IPPROTO_TCP)) {
2522 check = &(tcp_hdr(skb)->check);
2523 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2524 mac_iocb_ptr->total_hdrs_len =
2525 cpu_to_le16(skb_transport_offset(skb) +
2526 (tcp_hdr(skb)->doff << 2));
2527 } else {
2528 check = &(udp_hdr(skb)->check);
2529 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2530 mac_iocb_ptr->total_hdrs_len =
2531 cpu_to_le16(skb_transport_offset(skb) +
2532 sizeof(struct udphdr));
2533 }
2534 *check = ~csum_tcpudp_magic(iph->saddr,
2535 iph->daddr, len, iph->protocol, 0);
2536}
2537
Stephen Hemminger613573252009-08-31 19:50:58 +00002538static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002539{
2540 struct tx_ring_desc *tx_ring_desc;
2541 struct ob_mac_iocb_req *mac_iocb_ptr;
2542 struct ql_adapter *qdev = netdev_priv(ndev);
2543 int tso;
2544 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002545 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002546
2547 tx_ring = &qdev->tx_ring[tx_ring_idx];
2548
Ron Mercer74c50b42009-03-09 10:59:27 +00002549 if (skb_padto(skb, ETH_ZLEN))
2550 return NETDEV_TX_OK;
2551
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002552 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002553 netif_info(qdev, tx_queued, qdev->ndev,
2554 "%s: shutting down tx queue %d du to lack of resources.\n",
2555 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002556 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002557 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002558 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002559 return NETDEV_TX_BUSY;
2560 }
2561 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2562 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002563 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002564
2565 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2566 mac_iocb_ptr->tid = tx_ring_desc->index;
2567 /* We use the upper 32-bits to store the tx queue for this IO.
2568 * When we get the completion we can use it to establish the context.
2569 */
2570 mac_iocb_ptr->txq_idx = tx_ring_idx;
2571 tx_ring_desc->skb = skb;
2572
2573 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2574
2575 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002576 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2577 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002578 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2579 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2580 }
2581 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2582 if (tso < 0) {
2583 dev_kfree_skb_any(skb);
2584 return NETDEV_TX_OK;
2585 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2586 ql_hw_csum_setup(skb,
2587 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2588 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002589 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2590 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002591 netif_err(qdev, tx_queued, qdev->ndev,
2592 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002593 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002594 return NETDEV_TX_BUSY;
2595 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002596 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2597 tx_ring->prod_idx++;
2598 if (tx_ring->prod_idx == tx_ring->wq_len)
2599 tx_ring->prod_idx = 0;
2600 wmb();
2601
2602 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002603 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2604 "tx queued, slot %d, len %d\n",
2605 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002606
2607 atomic_dec(&tx_ring->tx_count);
2608 return NETDEV_TX_OK;
2609}
2610
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002611
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002612static void ql_free_shadow_space(struct ql_adapter *qdev)
2613{
2614 if (qdev->rx_ring_shadow_reg_area) {
2615 pci_free_consistent(qdev->pdev,
2616 PAGE_SIZE,
2617 qdev->rx_ring_shadow_reg_area,
2618 qdev->rx_ring_shadow_reg_dma);
2619 qdev->rx_ring_shadow_reg_area = NULL;
2620 }
2621 if (qdev->tx_ring_shadow_reg_area) {
2622 pci_free_consistent(qdev->pdev,
2623 PAGE_SIZE,
2624 qdev->tx_ring_shadow_reg_area,
2625 qdev->tx_ring_shadow_reg_dma);
2626 qdev->tx_ring_shadow_reg_area = NULL;
2627 }
2628}
2629
2630static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2631{
2632 qdev->rx_ring_shadow_reg_area =
2633 pci_alloc_consistent(qdev->pdev,
2634 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2635 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002636 netif_err(qdev, ifup, qdev->ndev,
2637 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002638 return -ENOMEM;
2639 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002640 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002641 qdev->tx_ring_shadow_reg_area =
2642 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2643 &qdev->tx_ring_shadow_reg_dma);
2644 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002645 netif_err(qdev, ifup, qdev->ndev,
2646 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002647 goto err_wqp_sh_area;
2648 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002649 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002650 return 0;
2651
2652err_wqp_sh_area:
2653 pci_free_consistent(qdev->pdev,
2654 PAGE_SIZE,
2655 qdev->rx_ring_shadow_reg_area,
2656 qdev->rx_ring_shadow_reg_dma);
2657 return -ENOMEM;
2658}
2659
2660static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2661{
2662 struct tx_ring_desc *tx_ring_desc;
2663 int i;
2664 struct ob_mac_iocb_req *mac_iocb_ptr;
2665
2666 mac_iocb_ptr = tx_ring->wq_base;
2667 tx_ring_desc = tx_ring->q;
2668 for (i = 0; i < tx_ring->wq_len; i++) {
2669 tx_ring_desc->index = i;
2670 tx_ring_desc->skb = NULL;
2671 tx_ring_desc->queue_entry = mac_iocb_ptr;
2672 mac_iocb_ptr++;
2673 tx_ring_desc++;
2674 }
2675 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2676 atomic_set(&tx_ring->queue_stopped, 0);
2677}
2678
2679static void ql_free_tx_resources(struct ql_adapter *qdev,
2680 struct tx_ring *tx_ring)
2681{
2682 if (tx_ring->wq_base) {
2683 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2684 tx_ring->wq_base, tx_ring->wq_base_dma);
2685 tx_ring->wq_base = NULL;
2686 }
2687 kfree(tx_ring->q);
2688 tx_ring->q = NULL;
2689}
2690
2691static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2692 struct tx_ring *tx_ring)
2693{
2694 tx_ring->wq_base =
2695 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2696 &tx_ring->wq_base_dma);
2697
Joe Perches8e95a202009-12-03 07:58:21 +00002698 if ((tx_ring->wq_base == NULL) ||
2699 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002700 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002701 return -ENOMEM;
2702 }
2703 tx_ring->q =
2704 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2705 if (tx_ring->q == NULL)
2706 goto err;
2707
2708 return 0;
2709err:
2710 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2711 tx_ring->wq_base, tx_ring->wq_base_dma);
2712 return -ENOMEM;
2713}
2714
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002715static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002716{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002717 struct bq_desc *lbq_desc;
2718
Ron Mercer7c734352009-10-19 03:32:19 +00002719 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002720
Ron Mercer7c734352009-10-19 03:32:19 +00002721 curr_idx = rx_ring->lbq_curr_idx;
2722 clean_idx = rx_ring->lbq_clean_idx;
2723 while (curr_idx != clean_idx) {
2724 lbq_desc = &rx_ring->lbq[curr_idx];
2725
2726 if (lbq_desc->p.pg_chunk.last_flag) {
2727 pci_unmap_page(qdev->pdev,
2728 lbq_desc->p.pg_chunk.map,
2729 ql_lbq_block_size(qdev),
2730 PCI_DMA_FROMDEVICE);
2731 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002732 }
Ron Mercer7c734352009-10-19 03:32:19 +00002733
2734 put_page(lbq_desc->p.pg_chunk.page);
2735 lbq_desc->p.pg_chunk.page = NULL;
2736
2737 if (++curr_idx == rx_ring->lbq_len)
2738 curr_idx = 0;
2739
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002740 }
2741}
2742
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002743static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002744{
2745 int i;
2746 struct bq_desc *sbq_desc;
2747
2748 for (i = 0; i < rx_ring->sbq_len; i++) {
2749 sbq_desc = &rx_ring->sbq[i];
2750 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002751 netif_err(qdev, ifup, qdev->ndev,
2752 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002753 return;
2754 }
2755 if (sbq_desc->p.skb) {
2756 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002757 dma_unmap_addr(sbq_desc, mapaddr),
2758 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002759 PCI_DMA_FROMDEVICE);
2760 dev_kfree_skb(sbq_desc->p.skb);
2761 sbq_desc->p.skb = NULL;
2762 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002763 }
2764}
2765
Ron Mercer4545a3f2009-02-23 10:42:17 +00002766/* Free all large and small rx buffers associated
2767 * with the completion queues for this device.
2768 */
2769static void ql_free_rx_buffers(struct ql_adapter *qdev)
2770{
2771 int i;
2772 struct rx_ring *rx_ring;
2773
2774 for (i = 0; i < qdev->rx_ring_count; i++) {
2775 rx_ring = &qdev->rx_ring[i];
2776 if (rx_ring->lbq)
2777 ql_free_lbq_buffers(qdev, rx_ring);
2778 if (rx_ring->sbq)
2779 ql_free_sbq_buffers(qdev, rx_ring);
2780 }
2781}
2782
2783static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2784{
2785 struct rx_ring *rx_ring;
2786 int i;
2787
2788 for (i = 0; i < qdev->rx_ring_count; i++) {
2789 rx_ring = &qdev->rx_ring[i];
2790 if (rx_ring->type != TX_Q)
2791 ql_update_buffer_queues(qdev, rx_ring);
2792 }
2793}
2794
2795static void ql_init_lbq_ring(struct ql_adapter *qdev,
2796 struct rx_ring *rx_ring)
2797{
2798 int i;
2799 struct bq_desc *lbq_desc;
2800 __le64 *bq = rx_ring->lbq_base;
2801
2802 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2803 for (i = 0; i < rx_ring->lbq_len; i++) {
2804 lbq_desc = &rx_ring->lbq[i];
2805 memset(lbq_desc, 0, sizeof(*lbq_desc));
2806 lbq_desc->index = i;
2807 lbq_desc->addr = bq;
2808 bq++;
2809 }
2810}
2811
2812static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002813 struct rx_ring *rx_ring)
2814{
2815 int i;
2816 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002817 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002818
Ron Mercer4545a3f2009-02-23 10:42:17 +00002819 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002820 for (i = 0; i < rx_ring->sbq_len; i++) {
2821 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002822 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002823 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002824 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002825 bq++;
2826 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002827}
2828
2829static void ql_free_rx_resources(struct ql_adapter *qdev,
2830 struct rx_ring *rx_ring)
2831{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002832 /* Free the small buffer queue. */
2833 if (rx_ring->sbq_base) {
2834 pci_free_consistent(qdev->pdev,
2835 rx_ring->sbq_size,
2836 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2837 rx_ring->sbq_base = NULL;
2838 }
2839
2840 /* Free the small buffer queue control blocks. */
2841 kfree(rx_ring->sbq);
2842 rx_ring->sbq = NULL;
2843
2844 /* Free the large buffer queue. */
2845 if (rx_ring->lbq_base) {
2846 pci_free_consistent(qdev->pdev,
2847 rx_ring->lbq_size,
2848 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2849 rx_ring->lbq_base = NULL;
2850 }
2851
2852 /* Free the large buffer queue control blocks. */
2853 kfree(rx_ring->lbq);
2854 rx_ring->lbq = NULL;
2855
2856 /* Free the rx queue. */
2857 if (rx_ring->cq_base) {
2858 pci_free_consistent(qdev->pdev,
2859 rx_ring->cq_size,
2860 rx_ring->cq_base, rx_ring->cq_base_dma);
2861 rx_ring->cq_base = NULL;
2862 }
2863}
2864
2865/* Allocate queues and buffers for this completions queue based
2866 * on the values in the parameter structure. */
2867static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2868 struct rx_ring *rx_ring)
2869{
2870
2871 /*
2872 * Allocate the completion queue for this rx_ring.
2873 */
2874 rx_ring->cq_base =
2875 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2876 &rx_ring->cq_base_dma);
2877
2878 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002879 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002880 return -ENOMEM;
2881 }
2882
2883 if (rx_ring->sbq_len) {
2884 /*
2885 * Allocate small buffer queue.
2886 */
2887 rx_ring->sbq_base =
2888 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2889 &rx_ring->sbq_base_dma);
2890
2891 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002892 netif_err(qdev, ifup, qdev->ndev,
2893 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002894 goto err_mem;
2895 }
2896
2897 /*
2898 * Allocate small buffer queue control blocks.
2899 */
2900 rx_ring->sbq =
2901 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2902 GFP_KERNEL);
2903 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002904 netif_err(qdev, ifup, qdev->ndev,
2905 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002906 goto err_mem;
2907 }
2908
Ron Mercer4545a3f2009-02-23 10:42:17 +00002909 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002910 }
2911
2912 if (rx_ring->lbq_len) {
2913 /*
2914 * Allocate large buffer queue.
2915 */
2916 rx_ring->lbq_base =
2917 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2918 &rx_ring->lbq_base_dma);
2919
2920 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002921 netif_err(qdev, ifup, qdev->ndev,
2922 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002923 goto err_mem;
2924 }
2925 /*
2926 * Allocate large buffer queue control blocks.
2927 */
2928 rx_ring->lbq =
2929 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2930 GFP_KERNEL);
2931 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002932 netif_err(qdev, ifup, qdev->ndev,
2933 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002934 goto err_mem;
2935 }
2936
Ron Mercer4545a3f2009-02-23 10:42:17 +00002937 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002938 }
2939
2940 return 0;
2941
2942err_mem:
2943 ql_free_rx_resources(qdev, rx_ring);
2944 return -ENOMEM;
2945}
2946
2947static void ql_tx_ring_clean(struct ql_adapter *qdev)
2948{
2949 struct tx_ring *tx_ring;
2950 struct tx_ring_desc *tx_ring_desc;
2951 int i, j;
2952
2953 /*
2954 * Loop through all queues and free
2955 * any resources.
2956 */
2957 for (j = 0; j < qdev->tx_ring_count; j++) {
2958 tx_ring = &qdev->tx_ring[j];
2959 for (i = 0; i < tx_ring->wq_len; i++) {
2960 tx_ring_desc = &tx_ring->q[i];
2961 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002962 netif_err(qdev, ifdown, qdev->ndev,
2963 "Freeing lost SKB %p, from queue %d, index %d.\n",
2964 tx_ring_desc->skb, j,
2965 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002966 ql_unmap_send(qdev, tx_ring_desc,
2967 tx_ring_desc->map_cnt);
2968 dev_kfree_skb(tx_ring_desc->skb);
2969 tx_ring_desc->skb = NULL;
2970 }
2971 }
2972 }
2973}
2974
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002975static void ql_free_mem_resources(struct ql_adapter *qdev)
2976{
2977 int i;
2978
2979 for (i = 0; i < qdev->tx_ring_count; i++)
2980 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2981 for (i = 0; i < qdev->rx_ring_count; i++)
2982 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2983 ql_free_shadow_space(qdev);
2984}
2985
2986static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2987{
2988 int i;
2989
2990 /* Allocate space for our shadow registers and such. */
2991 if (ql_alloc_shadow_space(qdev))
2992 return -ENOMEM;
2993
2994 for (i = 0; i < qdev->rx_ring_count; i++) {
2995 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002996 netif_err(qdev, ifup, qdev->ndev,
2997 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002998 goto err_mem;
2999 }
3000 }
3001 /* Allocate tx queue resources */
3002 for (i = 0; i < qdev->tx_ring_count; i++) {
3003 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003004 netif_err(qdev, ifup, qdev->ndev,
3005 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003006 goto err_mem;
3007 }
3008 }
3009 return 0;
3010
3011err_mem:
3012 ql_free_mem_resources(qdev);
3013 return -ENOMEM;
3014}
3015
3016/* Set up the rx ring control block and pass it to the chip.
3017 * The control block is defined as
3018 * "Completion Queue Initialization Control Block", or cqicb.
3019 */
3020static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3021{
3022 struct cqicb *cqicb = &rx_ring->cqicb;
3023 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003024 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003025 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003026 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003027 void __iomem *doorbell_area =
3028 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3029 int err = 0;
3030 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003031 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003032 __le64 *base_indirect_ptr;
3033 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003034
3035 /* Set up the shadow registers for this ring. */
3036 rx_ring->prod_idx_sh_reg = shadow_reg;
3037 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003038 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003039 shadow_reg += sizeof(u64);
3040 shadow_reg_dma += sizeof(u64);
3041 rx_ring->lbq_base_indirect = shadow_reg;
3042 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003043 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3044 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003045 rx_ring->sbq_base_indirect = shadow_reg;
3046 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3047
3048 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003049 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003050 rx_ring->cnsmr_idx = 0;
3051 rx_ring->curr_entry = rx_ring->cq_base;
3052
3053 /* PCI doorbell mem area + 0x04 for valid register */
3054 rx_ring->valid_db_reg = doorbell_area + 0x04;
3055
3056 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003057 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003058
3059 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003060 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003061
3062 memset((void *)cqicb, 0, sizeof(struct cqicb));
3063 cqicb->msix_vect = rx_ring->irq;
3064
Ron Mercer459caf52009-01-04 17:08:11 -08003065 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3066 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003067
Ron Mercer97345522009-01-09 11:31:50 +00003068 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003069
Ron Mercer97345522009-01-09 11:31:50 +00003070 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003071
3072 /*
3073 * Set up the control block load flags.
3074 */
3075 cqicb->flags = FLAGS_LC | /* Load queue base address */
3076 FLAGS_LV | /* Load MSI-X vector */
3077 FLAGS_LI; /* Load irq delay values */
3078 if (rx_ring->lbq_len) {
3079 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003080 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003081 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3082 page_entries = 0;
3083 do {
3084 *base_indirect_ptr = cpu_to_le64(tmp);
3085 tmp += DB_PAGE_SIZE;
3086 base_indirect_ptr++;
3087 page_entries++;
3088 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003089 cqicb->lbq_addr =
3090 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003091 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3092 (u16) rx_ring->lbq_buf_size;
3093 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3094 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3095 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003096 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003097 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003098 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003099 rx_ring->lbq_clean_idx = 0;
3100 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003101 }
3102 if (rx_ring->sbq_len) {
3103 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003104 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003105 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3106 page_entries = 0;
3107 do {
3108 *base_indirect_ptr = cpu_to_le64(tmp);
3109 tmp += DB_PAGE_SIZE;
3110 base_indirect_ptr++;
3111 page_entries++;
3112 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003113 cqicb->sbq_addr =
3114 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003115 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003116 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003117 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3118 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003119 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003120 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003121 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003122 rx_ring->sbq_clean_idx = 0;
3123 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003124 }
3125 switch (rx_ring->type) {
3126 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003127 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3128 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3129 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003130 case RX_Q:
3131 /* Inbound completion handling rx_rings run in
3132 * separate NAPI contexts.
3133 */
3134 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3135 64);
3136 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3137 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3138 break;
3139 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003140 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3141 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003143 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3144 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003145 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3146 CFG_LCQ, rx_ring->cq_id);
3147 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003148 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003149 return err;
3150 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003151 return err;
3152}
3153
3154static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3155{
3156 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3157 void __iomem *doorbell_area =
3158 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3159 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3160 (tx_ring->wq_id * sizeof(u64));
3161 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3162 (tx_ring->wq_id * sizeof(u64));
3163 int err = 0;
3164
3165 /*
3166 * Assign doorbell registers for this tx_ring.
3167 */
3168 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003169 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003170 tx_ring->prod_idx = 0;
3171 /* TX PCI doorbell mem area + 0x04 */
3172 tx_ring->valid_db_reg = doorbell_area + 0x04;
3173
3174 /*
3175 * Assign shadow registers for this tx_ring.
3176 */
3177 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3178 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3179
3180 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3181 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3182 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3183 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3184 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003185 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003186
Ron Mercer97345522009-01-09 11:31:50 +00003187 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003188
3189 ql_init_tx_ring(qdev, tx_ring);
3190
Ron Mercere3324712009-07-02 06:06:13 +00003191 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003192 (u16) tx_ring->wq_id);
3193 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003194 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003195 return err;
3196 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003197 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3198 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003199 return err;
3200}
3201
3202static void ql_disable_msix(struct ql_adapter *qdev)
3203{
3204 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3205 pci_disable_msix(qdev->pdev);
3206 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3207 kfree(qdev->msi_x_entry);
3208 qdev->msi_x_entry = NULL;
3209 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3210 pci_disable_msi(qdev->pdev);
3211 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3212 }
3213}
3214
Ron Mercera4ab6132009-08-27 11:02:10 +00003215/* We start by trying to get the number of vectors
3216 * stored in qdev->intr_count. If we don't get that
3217 * many then we reduce the count and try again.
3218 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003219static void ql_enable_msix(struct ql_adapter *qdev)
3220{
Ron Mercera4ab6132009-08-27 11:02:10 +00003221 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003222
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003223 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003224 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003225 /* Try to alloc space for the msix struct,
3226 * if it fails then go to MSI/legacy.
3227 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003228 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003229 sizeof(struct msix_entry),
3230 GFP_KERNEL);
3231 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003232 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003233 goto msi;
3234 }
3235
Ron Mercera4ab6132009-08-27 11:02:10 +00003236 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003237 qdev->msi_x_entry[i].entry = i;
3238
Ron Mercera4ab6132009-08-27 11:02:10 +00003239 /* Loop to get our vectors. We start with
3240 * what we want and settle for what we get.
3241 */
3242 do {
3243 err = pci_enable_msix(qdev->pdev,
3244 qdev->msi_x_entry, qdev->intr_count);
3245 if (err > 0)
3246 qdev->intr_count = err;
3247 } while (err > 0);
3248
3249 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003250 kfree(qdev->msi_x_entry);
3251 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003252 netif_warn(qdev, ifup, qdev->ndev,
3253 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003254 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003255 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003256 } else if (err == 0) {
3257 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003258 netif_info(qdev, ifup, qdev->ndev,
3259 "MSI-X Enabled, got %d vectors.\n",
3260 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003261 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003262 }
3263 }
3264msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003265 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003266 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003267 if (!pci_enable_msi(qdev->pdev)) {
3268 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003269 netif_info(qdev, ifup, qdev->ndev,
3270 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003271 return;
3272 }
3273 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003274 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003275 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3276 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003277}
3278
Ron Mercer39aa8162009-08-27 11:02:11 +00003279/* Each vector services 1 RSS ring and and 1 or more
3280 * TX completion rings. This function loops through
3281 * the TX completion rings and assigns the vector that
3282 * will service it. An example would be if there are
3283 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3284 * This would mean that vector 0 would service RSS ring 0
3285 * and TX competion rings 0,1,2 and 3. Vector 1 would
3286 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3287 */
3288static void ql_set_tx_vect(struct ql_adapter *qdev)
3289{
3290 int i, j, vect;
3291 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3292
3293 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3294 /* Assign irq vectors to TX rx_rings.*/
3295 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3296 i < qdev->rx_ring_count; i++) {
3297 if (j == tx_rings_per_vector) {
3298 vect++;
3299 j = 0;
3300 }
3301 qdev->rx_ring[i].irq = vect;
3302 j++;
3303 }
3304 } else {
3305 /* For single vector all rings have an irq
3306 * of zero.
3307 */
3308 for (i = 0; i < qdev->rx_ring_count; i++)
3309 qdev->rx_ring[i].irq = 0;
3310 }
3311}
3312
3313/* Set the interrupt mask for this vector. Each vector
3314 * will service 1 RSS ring and 1 or more TX completion
3315 * rings. This function sets up a bit mask per vector
3316 * that indicates which rings it services.
3317 */
3318static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3319{
3320 int j, vect = ctx->intr;
3321 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3322
3323 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3324 /* Add the RSS ring serviced by this vector
3325 * to the mask.
3326 */
3327 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3328 /* Add the TX ring(s) serviced by this vector
3329 * to the mask. */
3330 for (j = 0; j < tx_rings_per_vector; j++) {
3331 ctx->irq_mask |=
3332 (1 << qdev->rx_ring[qdev->rss_ring_count +
3333 (vect * tx_rings_per_vector) + j].cq_id);
3334 }
3335 } else {
3336 /* For single vector we just shift each queue's
3337 * ID into the mask.
3338 */
3339 for (j = 0; j < qdev->rx_ring_count; j++)
3340 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3341 }
3342}
3343
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003344/*
3345 * Here we build the intr_context structures based on
3346 * our rx_ring count and intr vector count.
3347 * The intr_context structure is used to hook each vector
3348 * to possibly different handlers.
3349 */
3350static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3351{
3352 int i = 0;
3353 struct intr_context *intr_context = &qdev->intr_context[0];
3354
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003355 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3356 /* Each rx_ring has it's
3357 * own intr_context since we have separate
3358 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003359 */
3360 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3361 qdev->rx_ring[i].irq = i;
3362 intr_context->intr = i;
3363 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003364 /* Set up this vector's bit-mask that indicates
3365 * which queues it services.
3366 */
3367 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003368 /*
3369 * We set up each vectors enable/disable/read bits so
3370 * there's no bit/mask calculations in the critical path.
3371 */
3372 intr_context->intr_en_mask =
3373 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3374 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3375 | i;
3376 intr_context->intr_dis_mask =
3377 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3378 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3379 INTR_EN_IHD | i;
3380 intr_context->intr_read_mask =
3381 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3382 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3383 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003384 if (i == 0) {
3385 /* The first vector/queue handles
3386 * broadcast/multicast, fatal errors,
3387 * and firmware events. This in addition
3388 * to normal inbound NAPI processing.
3389 */
3390 intr_context->handler = qlge_isr;
3391 sprintf(intr_context->name, "%s-rx-%d",
3392 qdev->ndev->name, i);
3393 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003394 /*
3395 * Inbound queues handle unicast frames only.
3396 */
3397 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003398 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003399 qdev->ndev->name, i);
3400 }
3401 }
3402 } else {
3403 /*
3404 * All rx_rings use the same intr_context since
3405 * there is only one vector.
3406 */
3407 intr_context->intr = 0;
3408 intr_context->qdev = qdev;
3409 /*
3410 * We set up each vectors enable/disable/read bits so
3411 * there's no bit/mask calculations in the critical path.
3412 */
3413 intr_context->intr_en_mask =
3414 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3415 intr_context->intr_dis_mask =
3416 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3417 INTR_EN_TYPE_DISABLE;
3418 intr_context->intr_read_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3420 /*
3421 * Single interrupt means one handler for all rings.
3422 */
3423 intr_context->handler = qlge_isr;
3424 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003425 /* Set up this vector's bit-mask that indicates
3426 * which queues it services. In this case there is
3427 * a single vector so it will service all RSS and
3428 * TX completion rings.
3429 */
3430 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003431 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003432 /* Tell the TX completion rings which MSIx vector
3433 * they will be using.
3434 */
3435 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003436}
3437
3438static void ql_free_irq(struct ql_adapter *qdev)
3439{
3440 int i;
3441 struct intr_context *intr_context = &qdev->intr_context[0];
3442
3443 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3444 if (intr_context->hooked) {
3445 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3446 free_irq(qdev->msi_x_entry[i].vector,
3447 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003448 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3449 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003450 } else {
3451 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003452 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3453 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003454 }
3455 }
3456 }
3457 ql_disable_msix(qdev);
3458}
3459
3460static int ql_request_irq(struct ql_adapter *qdev)
3461{
3462 int i;
3463 int status = 0;
3464 struct pci_dev *pdev = qdev->pdev;
3465 struct intr_context *intr_context = &qdev->intr_context[0];
3466
3467 ql_resolve_queues_to_irqs(qdev);
3468
3469 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3470 atomic_set(&intr_context->irq_cnt, 0);
3471 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3472 status = request_irq(qdev->msi_x_entry[i].vector,
3473 intr_context->handler,
3474 0,
3475 intr_context->name,
3476 &qdev->rx_ring[i]);
3477 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003478 netif_err(qdev, ifup, qdev->ndev,
3479 "Failed request for MSIX interrupt %d.\n",
3480 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003481 goto err_irq;
3482 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003483 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3484 "Hooked intr %d, queue type %s, with name %s.\n",
3485 i,
3486 qdev->rx_ring[i].type == DEFAULT_Q ?
3487 "DEFAULT_Q" :
3488 qdev->rx_ring[i].type == TX_Q ?
3489 "TX_Q" :
3490 qdev->rx_ring[i].type == RX_Q ?
3491 "RX_Q" : "",
3492 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003493 }
3494 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "trying msi or legacy interrupts.\n");
3497 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3498 "%s: irq = %d.\n", __func__, pdev->irq);
3499 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3500 "%s: context->name = %s.\n", __func__,
3501 intr_context->name);
3502 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3503 "%s: dev_id = 0x%p.\n", __func__,
3504 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003505 status =
3506 request_irq(pdev->irq, qlge_isr,
3507 test_bit(QL_MSI_ENABLED,
3508 &qdev->
3509 flags) ? 0 : IRQF_SHARED,
3510 intr_context->name, &qdev->rx_ring[0]);
3511 if (status)
3512 goto err_irq;
3513
Joe Perchesae9540f72010-02-09 11:49:52 +00003514 netif_err(qdev, ifup, qdev->ndev,
3515 "Hooked intr %d, queue type %s, with name %s.\n",
3516 i,
3517 qdev->rx_ring[0].type == DEFAULT_Q ?
3518 "DEFAULT_Q" :
3519 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3520 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3521 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003522 }
3523 intr_context->hooked = 1;
3524 }
3525 return status;
3526err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003527 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003528 ql_free_irq(qdev);
3529 return status;
3530}
3531
3532static int ql_start_rss(struct ql_adapter *qdev)
3533{
Ron Mercer541ae282009-10-08 09:54:37 +00003534 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3535 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3536 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3537 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3538 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3539 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540 struct ricb *ricb = &qdev->ricb;
3541 int status = 0;
3542 int i;
3543 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3544
Ron Mercere3324712009-07-02 06:06:13 +00003545 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003546
Ron Mercerb2014ff2009-08-27 11:02:09 +00003547 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003548 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003549 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3550 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003551
3552 /*
3553 * Fill out the Indirection Table.
3554 */
Ron Mercer541ae282009-10-08 09:54:37 +00003555 for (i = 0; i < 1024; i++)
3556 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003557
Ron Mercer541ae282009-10-08 09:54:37 +00003558 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3559 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003560
Joe Perchesae9540f72010-02-09 11:49:52 +00003561 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003562
Ron Mercere3324712009-07-02 06:06:13 +00003563 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003564 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003565 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003566 return status;
3567 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003568 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3569 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003570 return status;
3571}
3572
Ron Mercera5f59dc2009-07-02 06:06:07 +00003573static int ql_clear_routing_entries(struct ql_adapter *qdev)
3574{
3575 int i, status = 0;
3576
3577 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3578 if (status)
3579 return status;
3580 /* Clear all the entries in the routing table. */
3581 for (i = 0; i < 16; i++) {
3582 status = ql_set_routing_reg(qdev, i, 0, 0);
3583 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003584 netif_err(qdev, ifup, qdev->ndev,
3585 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003586 break;
3587 }
3588 }
3589 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3590 return status;
3591}
3592
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003593/* Initialize the frame-to-queue routing. */
3594static int ql_route_initialize(struct ql_adapter *qdev)
3595{
3596 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003597
3598 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003599 status = ql_clear_routing_entries(qdev);
3600 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003601 return status;
3602
3603 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3604 if (status)
3605 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003606
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003607 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3608 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003609 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003610 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003611 "Failed to init routing register "
3612 "for IP CSUM error packets.\n");
3613 goto exit;
3614 }
3615 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3616 RT_IDX_TU_CSUM_ERR, 1);
3617 if (status) {
3618 netif_err(qdev, ifup, qdev->ndev,
3619 "Failed to init routing register "
3620 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003621 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003622 }
3623 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3624 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003625 netif_err(qdev, ifup, qdev->ndev,
3626 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003627 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003628 }
3629 /* If we have more than one inbound queue, then turn on RSS in the
3630 * routing block.
3631 */
3632 if (qdev->rss_ring_count > 1) {
3633 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3634 RT_IDX_RSS_MATCH, 1);
3635 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003636 netif_err(qdev, ifup, qdev->ndev,
3637 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003638 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003639 }
3640 }
3641
3642 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3643 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003644 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003645 netif_err(qdev, ifup, qdev->ndev,
3646 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003647exit:
3648 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003649 return status;
3650}
3651
Ron Mercer2ee1e272009-03-03 12:10:33 +00003652int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003653{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003654 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003655
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003656 /* If check if the link is up and use to
3657 * determine if we are setting or clearing
3658 * the MAC address in the CAM.
3659 */
3660 set = ql_read32(qdev, STS);
3661 set &= qdev->port_link_up;
3662 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003663 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003664 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003665 return status;
3666 }
3667
3668 status = ql_route_initialize(qdev);
3669 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003670 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003671
3672 return status;
3673}
3674
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003675static int ql_adapter_initialize(struct ql_adapter *qdev)
3676{
3677 u32 value, mask;
3678 int i;
3679 int status = 0;
3680
3681 /*
3682 * Set up the System register to halt on errors.
3683 */
3684 value = SYS_EFE | SYS_FAE;
3685 mask = value << 16;
3686 ql_write32(qdev, SYS, mask | value);
3687
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003688 /* Set the default queue, and VLAN behavior. */
3689 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3690 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003691 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3692
3693 /* Set the MPI interrupt to enabled. */
3694 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3695
3696 /* Enable the function, set pagesize, enable error checking. */
3697 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003698 FSC_EC | FSC_VM_PAGE_4K;
3699 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003700
3701 /* Set/clear header splitting. */
3702 mask = FSC_VM_PAGESIZE_MASK |
3703 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3704 ql_write32(qdev, FSC, mask | value);
3705
Ron Mercer572c5262010-01-02 10:37:42 +00003706 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003707
Ron Mercera3b71932009-10-08 09:54:38 +00003708 /* Set RX packet routing to use port/pci function on which the
3709 * packet arrived on in addition to usual frame routing.
3710 * This is helpful on bonding where both interfaces can have
3711 * the same MAC address.
3712 */
3713 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003714 /* Reroute all packets to our Interface.
3715 * They may have been routed to MPI firmware
3716 * due to WOL.
3717 */
3718 value = ql_read32(qdev, MGMT_RCV_CFG);
3719 value &= ~MGMT_RCV_CFG_RM;
3720 mask = 0xffff0000;
3721
3722 /* Sticky reg needs clearing due to WOL. */
3723 ql_write32(qdev, MGMT_RCV_CFG, mask);
3724 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3725
3726 /* Default WOL is enable on Mezz cards */
3727 if (qdev->pdev->subsystem_device == 0x0068 ||
3728 qdev->pdev->subsystem_device == 0x0180)
3729 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003730
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003731 /* Start up the rx queues. */
3732 for (i = 0; i < qdev->rx_ring_count; i++) {
3733 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3734 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003735 netif_err(qdev, ifup, qdev->ndev,
3736 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003737 return status;
3738 }
3739 }
3740
3741 /* If there is more than one inbound completion queue
3742 * then download a RICB to configure RSS.
3743 */
3744 if (qdev->rss_ring_count > 1) {
3745 status = ql_start_rss(qdev);
3746 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003747 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003748 return status;
3749 }
3750 }
3751
3752 /* Start up the tx queues. */
3753 for (i = 0; i < qdev->tx_ring_count; i++) {
3754 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3755 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003756 netif_err(qdev, ifup, qdev->ndev,
3757 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003758 return status;
3759 }
3760 }
3761
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003762 /* Initialize the port and set the max framesize. */
3763 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003764 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003765 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003766
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003767 /* Set up the MAC address and frame routing filter. */
3768 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003769 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003770 netif_err(qdev, ifup, qdev->ndev,
3771 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003772 return status;
3773 }
3774
3775 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003776 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003777 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3778 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003779 napi_enable(&qdev->rx_ring[i].napi);
3780 }
3781
3782 return status;
3783}
3784
3785/* Issue soft reset to chip. */
3786static int ql_adapter_reset(struct ql_adapter *qdev)
3787{
3788 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003789 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003790 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003791
Ron Mercera5f59dc2009-07-02 06:06:07 +00003792 /* Clear all the entries in the routing table. */
3793 status = ql_clear_routing_entries(qdev);
3794 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003795 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003796 return status;
3797 }
3798
3799 end_jiffies = jiffies +
3800 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003801
3802 /* Stop management traffic. */
3803 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3804
3805 /* Wait for the NIC and MGMNT FIFOs to empty. */
3806 ql_wait_fifo_empty(qdev);
3807
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003809
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003810 do {
3811 value = ql_read32(qdev, RST_FO);
3812 if ((value & RST_FO_FR) == 0)
3813 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003814 cpu_relax();
3815 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003816
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003817 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003818 netif_err(qdev, ifdown, qdev->ndev,
3819 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003820 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003821 }
3822
Ron Mercer84087f42009-10-08 09:54:41 +00003823 /* Resume management traffic. */
3824 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003825 return status;
3826}
3827
3828static void ql_display_dev_info(struct net_device *ndev)
3829{
3830 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3831
Joe Perchesae9540f72010-02-09 11:49:52 +00003832 netif_info(qdev, probe, qdev->ndev,
3833 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3834 "XG Roll = %d, XG Rev = %d.\n",
3835 qdev->func,
3836 qdev->port,
3837 qdev->chip_rev_id & 0x0000000f,
3838 qdev->chip_rev_id >> 4 & 0x0000000f,
3839 qdev->chip_rev_id >> 8 & 0x0000000f,
3840 qdev->chip_rev_id >> 12 & 0x0000000f);
3841 netif_info(qdev, probe, qdev->ndev,
3842 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003843}
3844
Ron Mercerbc083ce2009-10-21 11:07:40 +00003845int ql_wol(struct ql_adapter *qdev)
3846{
3847 int status = 0;
3848 u32 wol = MB_WOL_DISABLE;
3849
3850 /* The CAM is still intact after a reset, but if we
3851 * are doing WOL, then we may need to program the
3852 * routing regs. We would also need to issue the mailbox
3853 * commands to instruct the MPI what to do per the ethtool
3854 * settings.
3855 */
3856
3857 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3858 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003859 netif_err(qdev, ifdown, qdev->ndev,
3860 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3861 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003862 return -EINVAL;
3863 }
3864
3865 if (qdev->wol & WAKE_MAGIC) {
3866 status = ql_mb_wol_set_magic(qdev, 1);
3867 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003868 netif_err(qdev, ifdown, qdev->ndev,
3869 "Failed to set magic packet on %s.\n",
3870 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003871 return status;
3872 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003873 netif_info(qdev, drv, qdev->ndev,
3874 "Enabled magic packet successfully on %s.\n",
3875 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003876
3877 wol |= MB_WOL_MAGIC_PKT;
3878 }
3879
3880 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003881 wol |= MB_WOL_MODE_ON;
3882 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003883 netif_err(qdev, drv, qdev->ndev,
3884 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003885 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003886 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003887 }
3888
3889 return status;
3890}
3891
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003892static int ql_adapter_down(struct ql_adapter *qdev)
3893{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003894 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003895
Ron Mercer6a473302009-07-02 06:06:12 +00003896 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003897
Ron Mercer6497b602009-02-12 16:37:13 -08003898 /* Don't kill the reset worker thread if we
3899 * are in the process of recovery.
3900 */
3901 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3902 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003903 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3904 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003905 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003906 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003907 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003908
Ron Mercer39aa8162009-08-27 11:02:11 +00003909 for (i = 0; i < qdev->rss_ring_count; i++)
3910 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003911
3912 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3913
3914 ql_disable_interrupts(qdev);
3915
3916 ql_tx_ring_clean(qdev);
3917
Ron Mercer6b318cb2009-03-09 10:59:26 +00003918 /* Call netif_napi_del() from common point.
3919 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003920 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003921 netif_napi_del(&qdev->rx_ring[i].napi);
3922
Ron Mercer4545a3f2009-02-23 10:42:17 +00003923 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003924
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003925 status = ql_adapter_reset(qdev);
3926 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003927 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3928 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003929 return status;
3930}
3931
3932static int ql_adapter_up(struct ql_adapter *qdev)
3933{
3934 int err = 0;
3935
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003936 err = ql_adapter_initialize(qdev);
3937 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003938 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003939 goto err_init;
3940 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003941 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003942 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003943 /* If the port is initialized and the
3944 * link is up the turn on the carrier.
3945 */
3946 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3947 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003948 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003949 /* Restore rx mode. */
3950 clear_bit(QL_ALLMULTI, &qdev->flags);
3951 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3952 qlge_set_multicast_list(qdev->ndev);
3953
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954 ql_enable_interrupts(qdev);
3955 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003956 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003957
3958 return 0;
3959err_init:
3960 ql_adapter_reset(qdev);
3961 return err;
3962}
3963
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003964static void ql_release_adapter_resources(struct ql_adapter *qdev)
3965{
3966 ql_free_mem_resources(qdev);
3967 ql_free_irq(qdev);
3968}
3969
3970static int ql_get_adapter_resources(struct ql_adapter *qdev)
3971{
3972 int status = 0;
3973
3974 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003975 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003976 return -ENOMEM;
3977 }
3978 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003979 return status;
3980}
3981
3982static int qlge_close(struct net_device *ndev)
3983{
3984 struct ql_adapter *qdev = netdev_priv(ndev);
3985
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003986 /* If we hit pci_channel_io_perm_failure
3987 * failure condition, then we already
3988 * brought the adapter down.
3989 */
3990 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003991 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003992 clear_bit(QL_EEH_FATAL, &qdev->flags);
3993 return 0;
3994 }
3995
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003996 /*
3997 * Wait for device to recover from a reset.
3998 * (Rarely happens, but possible.)
3999 */
4000 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4001 msleep(1);
4002 ql_adapter_down(qdev);
4003 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004004 return 0;
4005}
4006
4007static int ql_configure_rings(struct ql_adapter *qdev)
4008{
4009 int i;
4010 struct rx_ring *rx_ring;
4011 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004012 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004013 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4014 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4015
4016 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004017
Ron Mercera4ab6132009-08-27 11:02:10 +00004018 /* In a perfect world we have one RSS ring for each CPU
4019 * and each has it's own vector. To do that we ask for
4020 * cpu_cnt vectors. ql_enable_msix() will adjust the
4021 * vector count to what we actually get. We then
4022 * allocate an RSS ring for each.
4023 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004024 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004025 qdev->intr_count = cpu_cnt;
4026 ql_enable_msix(qdev);
4027 /* Adjust the RSS ring count to the actual vector count. */
4028 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004029 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004030 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004031
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004032 for (i = 0; i < qdev->tx_ring_count; i++) {
4033 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004034 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004035 tx_ring->qdev = qdev;
4036 tx_ring->wq_id = i;
4037 tx_ring->wq_len = qdev->tx_ring_size;
4038 tx_ring->wq_size =
4039 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4040
4041 /*
4042 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004043 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004044 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004045 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004046 }
4047
4048 for (i = 0; i < qdev->rx_ring_count; i++) {
4049 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004050 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004051 rx_ring->qdev = qdev;
4052 rx_ring->cq_id = i;
4053 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004054 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004055 /*
4056 * Inbound (RSS) queues.
4057 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004058 rx_ring->cq_len = qdev->rx_ring_size;
4059 rx_ring->cq_size =
4060 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4061 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4062 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004063 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004064 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004065 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4066 "lbq_buf_size %d, order = %d\n",
4067 rx_ring->lbq_buf_size,
4068 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004069 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4070 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004071 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004072 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004073 rx_ring->type = RX_Q;
4074 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004075 /*
4076 * Outbound queue handles outbound completions only.
4077 */
4078 /* outbound cq is same size as tx_ring it services. */
4079 rx_ring->cq_len = qdev->tx_ring_size;
4080 rx_ring->cq_size =
4081 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4082 rx_ring->lbq_len = 0;
4083 rx_ring->lbq_size = 0;
4084 rx_ring->lbq_buf_size = 0;
4085 rx_ring->sbq_len = 0;
4086 rx_ring->sbq_size = 0;
4087 rx_ring->sbq_buf_size = 0;
4088 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004089 }
4090 }
4091 return 0;
4092}
4093
4094static int qlge_open(struct net_device *ndev)
4095{
4096 int err = 0;
4097 struct ql_adapter *qdev = netdev_priv(ndev);
4098
Ron Mercer74e12432009-11-11 12:54:04 +00004099 err = ql_adapter_reset(qdev);
4100 if (err)
4101 return err;
4102
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004103 err = ql_configure_rings(qdev);
4104 if (err)
4105 return err;
4106
4107 err = ql_get_adapter_resources(qdev);
4108 if (err)
4109 goto error_up;
4110
4111 err = ql_adapter_up(qdev);
4112 if (err)
4113 goto error_up;
4114
4115 return err;
4116
4117error_up:
4118 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004119 return err;
4120}
4121
Ron Mercer7c734352009-10-19 03:32:19 +00004122static int ql_change_rx_buffers(struct ql_adapter *qdev)
4123{
4124 struct rx_ring *rx_ring;
4125 int i, status;
4126 u32 lbq_buf_len;
4127
4128 /* Wait for an oustanding reset to complete. */
4129 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4130 int i = 3;
4131 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004132 netif_err(qdev, ifup, qdev->ndev,
4133 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004134 ssleep(1);
4135 }
4136
4137 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004138 netif_err(qdev, ifup, qdev->ndev,
4139 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004140 return -ETIMEDOUT;
4141 }
4142 }
4143
4144 status = ql_adapter_down(qdev);
4145 if (status)
4146 goto error;
4147
4148 /* Get the new rx buffer size. */
4149 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4150 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4151 qdev->lbq_buf_order = get_order(lbq_buf_len);
4152
4153 for (i = 0; i < qdev->rss_ring_count; i++) {
4154 rx_ring = &qdev->rx_ring[i];
4155 /* Set the new size. */
4156 rx_ring->lbq_buf_size = lbq_buf_len;
4157 }
4158
4159 status = ql_adapter_up(qdev);
4160 if (status)
4161 goto error;
4162
4163 return status;
4164error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004165 netif_alert(qdev, ifup, qdev->ndev,
4166 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004167 set_bit(QL_ADAPTER_UP, &qdev->flags);
4168 dev_close(qdev->ndev);
4169 return status;
4170}
4171
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004172static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4173{
4174 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004175 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004176
4177 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004178 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004179 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004180 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004181 } else
4182 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004183
4184 queue_delayed_work(qdev->workqueue,
4185 &qdev->mpi_port_cfg_work, 3*HZ);
4186
Breno Leitao746079d2010-02-04 10:11:19 +00004187 ndev->mtu = new_mtu;
4188
Ron Mercer7c734352009-10-19 03:32:19 +00004189 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004190 return 0;
4191 }
4192
Ron Mercer7c734352009-10-19 03:32:19 +00004193 status = ql_change_rx_buffers(qdev);
4194 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004195 netif_err(qdev, ifup, qdev->ndev,
4196 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004197 }
4198
4199 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004200}
4201
4202static struct net_device_stats *qlge_get_stats(struct net_device
4203 *ndev)
4204{
Ron Mercer885ee392009-11-03 13:49:31 +00004205 struct ql_adapter *qdev = netdev_priv(ndev);
4206 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4207 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4208 unsigned long pkts, mcast, dropped, errors, bytes;
4209 int i;
4210
4211 /* Get RX stats. */
4212 pkts = mcast = dropped = errors = bytes = 0;
4213 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4214 pkts += rx_ring->rx_packets;
4215 bytes += rx_ring->rx_bytes;
4216 dropped += rx_ring->rx_dropped;
4217 errors += rx_ring->rx_errors;
4218 mcast += rx_ring->rx_multicast;
4219 }
4220 ndev->stats.rx_packets = pkts;
4221 ndev->stats.rx_bytes = bytes;
4222 ndev->stats.rx_dropped = dropped;
4223 ndev->stats.rx_errors = errors;
4224 ndev->stats.multicast = mcast;
4225
4226 /* Get TX stats. */
4227 pkts = errors = bytes = 0;
4228 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4229 pkts += tx_ring->tx_packets;
4230 bytes += tx_ring->tx_bytes;
4231 errors += tx_ring->tx_errors;
4232 }
4233 ndev->stats.tx_packets = pkts;
4234 ndev->stats.tx_bytes = bytes;
4235 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004236 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004237}
4238
Ron Mercerf2c05002010-07-05 12:19:37 +00004239void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004240{
4241 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004242 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004243 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004244
Ron Mercercc288f52009-02-23 10:42:14 +00004245 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4246 if (status)
4247 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004248 /*
4249 * Set or clear promiscuous mode if a
4250 * transition is taking place.
4251 */
4252 if (ndev->flags & IFF_PROMISC) {
4253 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4254 if (ql_set_routing_reg
4255 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004256 netif_err(qdev, hw, qdev->ndev,
4257 "Failed to set promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004258 } else {
4259 set_bit(QL_PROMISCUOUS, &qdev->flags);
4260 }
4261 }
4262 } else {
4263 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4264 if (ql_set_routing_reg
4265 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004266 netif_err(qdev, hw, qdev->ndev,
4267 "Failed to clear promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004268 } else {
4269 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4270 }
4271 }
4272 }
4273
4274 /*
4275 * Set or clear all multicast mode if a
4276 * transition is taking place.
4277 */
4278 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004279 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004280 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4281 if (ql_set_routing_reg
4282 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004283 netif_err(qdev, hw, qdev->ndev,
4284 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004285 } else {
4286 set_bit(QL_ALLMULTI, &qdev->flags);
4287 }
4288 }
4289 } else {
4290 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4291 if (ql_set_routing_reg
4292 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004293 netif_err(qdev, hw, qdev->ndev,
4294 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004295 } else {
4296 clear_bit(QL_ALLMULTI, &qdev->flags);
4297 }
4298 }
4299 }
4300
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004301 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004302 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4303 if (status)
4304 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004305 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004306 netdev_for_each_mc_addr(ha, ndev) {
4307 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004308 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004309 netif_err(qdev, hw, qdev->ndev,
4310 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004311 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004312 goto exit;
4313 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004314 i++;
4315 }
Ron Mercercc288f52009-02-23 10:42:14 +00004316 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004317 if (ql_set_routing_reg
4318 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004319 netif_err(qdev, hw, qdev->ndev,
4320 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004321 } else {
4322 set_bit(QL_ALLMULTI, &qdev->flags);
4323 }
4324 }
4325exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004326 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327}
4328
4329static int qlge_set_mac_address(struct net_device *ndev, void *p)
4330{
4331 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4332 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004333 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004334
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004335 if (!is_valid_ether_addr(addr->sa_data))
4336 return -EADDRNOTAVAIL;
4337 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004338 /* Update local copy of current mac address. */
4339 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004340
Ron Mercercc288f52009-02-23 10:42:14 +00004341 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4342 if (status)
4343 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004344 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4345 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004346 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004347 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004348 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4349 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004350}
4351
4352static void qlge_tx_timeout(struct net_device *ndev)
4353{
4354 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004355 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004356}
4357
4358static void ql_asic_reset_work(struct work_struct *work)
4359{
4360 struct ql_adapter *qdev =
4361 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004362 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004363 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004364 status = ql_adapter_down(qdev);
4365 if (status)
4366 goto error;
4367
4368 status = ql_adapter_up(qdev);
4369 if (status)
4370 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004371
4372 /* Restore rx mode. */
4373 clear_bit(QL_ALLMULTI, &qdev->flags);
4374 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4375 qlge_set_multicast_list(qdev->ndev);
4376
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004377 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004378 return;
4379error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004380 netif_alert(qdev, ifup, qdev->ndev,
4381 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004382
Ron Mercerdb988122009-03-09 10:59:17 +00004383 set_bit(QL_ADAPTER_UP, &qdev->flags);
4384 dev_close(qdev->ndev);
4385 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004386}
4387
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004388static struct nic_operations qla8012_nic_ops = {
4389 .get_flash = ql_get_8012_flash_params,
4390 .port_initialize = ql_8012_port_initialize,
4391};
4392
Ron Mercercdca8d02009-03-02 08:07:31 +00004393static struct nic_operations qla8000_nic_ops = {
4394 .get_flash = ql_get_8000_flash_params,
4395 .port_initialize = ql_8000_port_initialize,
4396};
4397
Ron Mercere4552f52009-06-09 05:39:32 +00004398/* Find the pcie function number for the other NIC
4399 * on this chip. Since both NIC functions share a
4400 * common firmware we have the lowest enabled function
4401 * do any common work. Examples would be resetting
4402 * after a fatal firmware error, or doing a firmware
4403 * coredump.
4404 */
4405static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004406{
Ron Mercere4552f52009-06-09 05:39:32 +00004407 int status = 0;
4408 u32 temp;
4409 u32 nic_func1, nic_func2;
4410
4411 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4412 &temp);
4413 if (status)
4414 return status;
4415
4416 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4417 MPI_TEST_NIC_FUNC_MASK);
4418 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4419 MPI_TEST_NIC_FUNC_MASK);
4420
4421 if (qdev->func == nic_func1)
4422 qdev->alt_func = nic_func2;
4423 else if (qdev->func == nic_func2)
4424 qdev->alt_func = nic_func1;
4425 else
4426 status = -EIO;
4427
4428 return status;
4429}
4430
4431static int ql_get_board_info(struct ql_adapter *qdev)
4432{
4433 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004434 qdev->func =
4435 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004436 if (qdev->func > 3)
4437 return -EIO;
4438
4439 status = ql_get_alt_pcie_func(qdev);
4440 if (status)
4441 return status;
4442
4443 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4444 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004445 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4446 qdev->port_link_up = STS_PL1;
4447 qdev->port_init = STS_PI1;
4448 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4449 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4450 } else {
4451 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4452 qdev->port_link_up = STS_PL0;
4453 qdev->port_init = STS_PI0;
4454 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4455 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4456 }
4457 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004458 qdev->device_id = qdev->pdev->device;
4459 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4460 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004461 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4462 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004463 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004464}
4465
4466static void ql_release_all(struct pci_dev *pdev)
4467{
4468 struct net_device *ndev = pci_get_drvdata(pdev);
4469 struct ql_adapter *qdev = netdev_priv(ndev);
4470
4471 if (qdev->workqueue) {
4472 destroy_workqueue(qdev->workqueue);
4473 qdev->workqueue = NULL;
4474 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004475
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004476 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004477 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004478 if (qdev->doorbell_area)
4479 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004480 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004481 pci_release_regions(pdev);
4482 pci_set_drvdata(pdev, NULL);
4483}
4484
4485static int __devinit ql_init_device(struct pci_dev *pdev,
4486 struct net_device *ndev, int cards_found)
4487{
4488 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004489 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004490
Ron Mercere3324712009-07-02 06:06:13 +00004491 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004492 err = pci_enable_device(pdev);
4493 if (err) {
4494 dev_err(&pdev->dev, "PCI device enable failed.\n");
4495 return err;
4496 }
4497
Ron Mercerebd6e772009-09-29 08:39:25 +00004498 qdev->ndev = ndev;
4499 qdev->pdev = pdev;
4500 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004501
Ron Mercerbc9167f2009-10-10 09:35:04 +00004502 /* Set PCIe read request size */
4503 err = pcie_set_readrq(pdev, 4096);
4504 if (err) {
4505 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004506 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004507 }
4508
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004509 err = pci_request_regions(pdev, DRV_NAME);
4510 if (err) {
4511 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004512 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004513 }
4514
4515 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004516 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004517 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004518 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004519 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004520 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004521 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004522 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004523 }
4524
4525 if (err) {
4526 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004527 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004528 }
4529
Ron Mercer73475332009-11-06 07:44:58 +00004530 /* Set PCIe reset type for EEH to fundamental. */
4531 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004532 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004533 qdev->reg_base =
4534 ioremap_nocache(pci_resource_start(pdev, 1),
4535 pci_resource_len(pdev, 1));
4536 if (!qdev->reg_base) {
4537 dev_err(&pdev->dev, "Register mapping failed.\n");
4538 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004539 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004540 }
4541
4542 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4543 qdev->doorbell_area =
4544 ioremap_nocache(pci_resource_start(pdev, 3),
4545 pci_resource_len(pdev, 3));
4546 if (!qdev->doorbell_area) {
4547 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4548 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004549 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004550 }
4551
Ron Mercere4552f52009-06-09 05:39:32 +00004552 err = ql_get_board_info(qdev);
4553 if (err) {
4554 dev_err(&pdev->dev, "Register access failed.\n");
4555 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004556 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004557 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004558 qdev->msg_enable = netif_msg_init(debug, default_msg);
4559 spin_lock_init(&qdev->hw_lock);
4560 spin_lock_init(&qdev->stats_lock);
4561
Ron Mercer8aae2602010-01-15 13:31:28 +00004562 if (qlge_mpi_coredump) {
4563 qdev->mpi_coredump =
4564 vmalloc(sizeof(struct ql_mpi_coredump));
4565 if (qdev->mpi_coredump == NULL) {
4566 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4567 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004568 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004569 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004570 if (qlge_force_coredump)
4571 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004572 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004573 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004574 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004575 if (err) {
4576 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004577 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578 }
4579
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004580 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004581 /* Keep local copy of current mac address. */
4582 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004583
4584 /* Set up the default ring sizes. */
4585 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4586 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4587
4588 /* Set up the coalescing parameters. */
4589 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4590 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4591 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4592 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4593
4594 /*
4595 * Set up the operating parameters.
4596 */
4597 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004598 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4599 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4600 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4601 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004602 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004603 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004604 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004605 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004606
4607 if (!cards_found) {
4608 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4609 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4610 DRV_NAME, DRV_VERSION);
4611 }
4612 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004613err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004614 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004615err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004616 pci_disable_device(pdev);
4617 return err;
4618}
4619
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004620static const struct net_device_ops qlge_netdev_ops = {
4621 .ndo_open = qlge_open,
4622 .ndo_stop = qlge_close,
4623 .ndo_start_xmit = qlge_send,
4624 .ndo_change_mtu = qlge_change_mtu,
4625 .ndo_get_stats = qlge_get_stats,
4626 .ndo_set_multicast_list = qlge_set_multicast_list,
4627 .ndo_set_mac_address = qlge_set_mac_address,
4628 .ndo_validate_addr = eth_validate_addr,
4629 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004630 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4631 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4632 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004633};
4634
Ron Mercer15c052f2010-02-04 13:32:46 -08004635static void ql_timer(unsigned long data)
4636{
4637 struct ql_adapter *qdev = (struct ql_adapter *)data;
4638 u32 var = 0;
4639
4640 var = ql_read32(qdev, STS);
4641 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004642 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004643 return;
4644 }
4645
Breno Leitao72046d82010-07-01 03:00:17 +00004646 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004647}
4648
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004649static int __devinit qlge_probe(struct pci_dev *pdev,
4650 const struct pci_device_id *pci_entry)
4651{
4652 struct net_device *ndev = NULL;
4653 struct ql_adapter *qdev = NULL;
4654 static int cards_found = 0;
4655 int err = 0;
4656
Ron Mercer1e213302009-03-09 10:59:21 +00004657 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4658 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004659 if (!ndev)
4660 return -ENOMEM;
4661
4662 err = ql_init_device(pdev, ndev, cards_found);
4663 if (err < 0) {
4664 free_netdev(ndev);
4665 return err;
4666 }
4667
4668 qdev = netdev_priv(ndev);
4669 SET_NETDEV_DEV(ndev, &pdev->dev);
4670 ndev->features = (0
4671 | NETIF_F_IP_CSUM
4672 | NETIF_F_SG
4673 | NETIF_F_TSO
4674 | NETIF_F_TSO6
4675 | NETIF_F_TSO_ECN
4676 | NETIF_F_HW_VLAN_TX
4677 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004678 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004679
4680 if (test_bit(QL_DMA64, &qdev->flags))
4681 ndev->features |= NETIF_F_HIGHDMA;
4682
4683 /*
4684 * Set up net_device structure.
4685 */
4686 ndev->tx_queue_len = qdev->tx_ring_size;
4687 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004688
4689 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004690 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004691 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004692
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004693 err = register_netdev(ndev);
4694 if (err) {
4695 dev_err(&pdev->dev, "net device registration failed.\n");
4696 ql_release_all(pdev);
4697 pci_disable_device(pdev);
4698 return err;
4699 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004700 /* Start up the timer to trigger EEH if
4701 * the bus goes dead
4702 */
4703 init_timer_deferrable(&qdev->timer);
4704 qdev->timer.data = (unsigned long)qdev;
4705 qdev->timer.function = ql_timer;
4706 qdev->timer.expires = jiffies + (5*HZ);
4707 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004708 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004709 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004710 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004711 cards_found++;
4712 return 0;
4713}
4714
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004715netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4716{
4717 return qlge_send(skb, ndev);
4718}
4719
4720int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4721{
4722 return ql_clean_inbound_rx_ring(rx_ring, budget);
4723}
4724
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004725static void __devexit qlge_remove(struct pci_dev *pdev)
4726{
4727 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004728 struct ql_adapter *qdev = netdev_priv(ndev);
4729 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004730 unregister_netdev(ndev);
4731 ql_release_all(pdev);
4732 pci_disable_device(pdev);
4733 free_netdev(ndev);
4734}
4735
Ron Mercer6d190c62009-10-28 08:39:20 +00004736/* Clean up resources without touching hardware. */
4737static void ql_eeh_close(struct net_device *ndev)
4738{
4739 int i;
4740 struct ql_adapter *qdev = netdev_priv(ndev);
4741
4742 if (netif_carrier_ok(ndev)) {
4743 netif_carrier_off(ndev);
4744 netif_stop_queue(ndev);
4745 }
4746
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004747 /* Disabling the timer */
4748 del_timer_sync(&qdev->timer);
Ron Mercer6d190c62009-10-28 08:39:20 +00004749 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4750 cancel_delayed_work_sync(&qdev->asic_reset_work);
4751 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4752 cancel_delayed_work_sync(&qdev->mpi_work);
4753 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004754 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercer6d190c62009-10-28 08:39:20 +00004755 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4756
4757 for (i = 0; i < qdev->rss_ring_count; i++)
4758 netif_napi_del(&qdev->rx_ring[i].napi);
4759
4760 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4761 ql_tx_ring_clean(qdev);
4762 ql_free_rx_buffers(qdev);
4763 ql_release_adapter_resources(qdev);
4764}
4765
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004766/*
4767 * This callback is called by the PCI subsystem whenever
4768 * a PCI bus error is detected.
4769 */
4770static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4771 enum pci_channel_state state)
4772{
4773 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004774 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004775
Ron Mercer6d190c62009-10-28 08:39:20 +00004776 switch (state) {
4777 case pci_channel_io_normal:
4778 return PCI_ERS_RESULT_CAN_RECOVER;
4779 case pci_channel_io_frozen:
4780 netif_device_detach(ndev);
4781 if (netif_running(ndev))
4782 ql_eeh_close(ndev);
4783 pci_disable_device(pdev);
4784 return PCI_ERS_RESULT_NEED_RESET;
4785 case pci_channel_io_perm_failure:
4786 dev_err(&pdev->dev,
4787 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004788 ql_eeh_close(ndev);
4789 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004790 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004791 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004792
4793 /* Request a slot reset. */
4794 return PCI_ERS_RESULT_NEED_RESET;
4795}
4796
4797/*
4798 * This callback is called after the PCI buss has been reset.
4799 * Basically, this tries to restart the card from scratch.
4800 * This is a shortened version of the device probe/discovery code,
4801 * it resembles the first-half of the () routine.
4802 */
4803static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4804{
4805 struct net_device *ndev = pci_get_drvdata(pdev);
4806 struct ql_adapter *qdev = netdev_priv(ndev);
4807
Ron Mercer6d190c62009-10-28 08:39:20 +00004808 pdev->error_state = pci_channel_io_normal;
4809
4810 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004811 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004812 netif_err(qdev, ifup, qdev->ndev,
4813 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004814 return PCI_ERS_RESULT_DISCONNECT;
4815 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004816 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004817
4818 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004819 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004820 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004821 return PCI_ERS_RESULT_DISCONNECT;
4822 }
4823
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004824 return PCI_ERS_RESULT_RECOVERED;
4825}
4826
4827static void qlge_io_resume(struct pci_dev *pdev)
4828{
4829 struct net_device *ndev = pci_get_drvdata(pdev);
4830 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004831 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004832
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004833 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004834 err = qlge_open(ndev);
4835 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004836 netif_err(qdev, ifup, qdev->ndev,
4837 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004838 return;
4839 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004840 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004841 netif_err(qdev, ifup, qdev->ndev,
4842 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004843 }
Breno Leitao72046d82010-07-01 03:00:17 +00004844 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004845 netif_device_attach(ndev);
4846}
4847
4848static struct pci_error_handlers qlge_err_handler = {
4849 .error_detected = qlge_io_error_detected,
4850 .slot_reset = qlge_io_slot_reset,
4851 .resume = qlge_io_resume,
4852};
4853
4854static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4855{
4856 struct net_device *ndev = pci_get_drvdata(pdev);
4857 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004858 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004859
4860 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004861 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004862
4863 if (netif_running(ndev)) {
4864 err = ql_adapter_down(qdev);
4865 if (!err)
4866 return err;
4867 }
4868
Ron Mercerbc083ce2009-10-21 11:07:40 +00004869 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004870 err = pci_save_state(pdev);
4871 if (err)
4872 return err;
4873
4874 pci_disable_device(pdev);
4875
4876 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4877
4878 return 0;
4879}
4880
David S. Miller04da2cf2008-09-19 16:14:24 -07004881#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004882static int qlge_resume(struct pci_dev *pdev)
4883{
4884 struct net_device *ndev = pci_get_drvdata(pdev);
4885 struct ql_adapter *qdev = netdev_priv(ndev);
4886 int err;
4887
4888 pci_set_power_state(pdev, PCI_D0);
4889 pci_restore_state(pdev);
4890 err = pci_enable_device(pdev);
4891 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004892 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004893 return err;
4894 }
4895 pci_set_master(pdev);
4896
4897 pci_enable_wake(pdev, PCI_D3hot, 0);
4898 pci_enable_wake(pdev, PCI_D3cold, 0);
4899
4900 if (netif_running(ndev)) {
4901 err = ql_adapter_up(qdev);
4902 if (err)
4903 return err;
4904 }
4905
Breno Leitao72046d82010-07-01 03:00:17 +00004906 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004907 netif_device_attach(ndev);
4908
4909 return 0;
4910}
David S. Miller04da2cf2008-09-19 16:14:24 -07004911#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004912
4913static void qlge_shutdown(struct pci_dev *pdev)
4914{
4915 qlge_suspend(pdev, PMSG_SUSPEND);
4916}
4917
4918static struct pci_driver qlge_driver = {
4919 .name = DRV_NAME,
4920 .id_table = qlge_pci_tbl,
4921 .probe = qlge_probe,
4922 .remove = __devexit_p(qlge_remove),
4923#ifdef CONFIG_PM
4924 .suspend = qlge_suspend,
4925 .resume = qlge_resume,
4926#endif
4927 .shutdown = qlge_shutdown,
4928 .err_handler = &qlge_err_handler
4929};
4930
4931static int __init qlge_init_module(void)
4932{
4933 return pci_register_driver(&qlge_driver);
4934}
4935
4936static void __exit qlge_exit(void)
4937{
4938 pci_unregister_driver(&qlge_driver);
4939}
4940
4941module_init(qlge_init_module);
4942module_exit(qlge_exit);