blob: 39360c485867dd23434ab0fe4efd352edf9cdcc2 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378
Joe Perchesae9540f72010-02-09 11:49:52 +0000379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
383 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400384
385 status =
386 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400388 if (status)
389 goto exit;
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 type); /* type */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
394 status =
395 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400397 if (status)
398 goto exit;
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 type); /* type */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
403 status =
404 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400406 if (status)
407 goto exit;
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
410 type); /* type */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
414 */
Ron Mercer76b26692009-10-08 09:54:40 +0000415 cam_output = (CAM_OUT_ROUTE_NIC |
416 (qdev->
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400423 break;
424 }
425 case MAC_ADDR_TYPE_VLAN:
426 {
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
432 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
436 index,
437 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400438
439 status =
440 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 if (status)
443 goto exit;
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
446 type | /* type */
447 enable_bit); /* enable/disable */
448 break;
449 }
450 case MAC_ADDR_TYPE_MULTI_FLTR:
451 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400454 status = -EPERM;
455 }
456exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400457 return status;
458}
459
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000460/* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
463 */
464static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465{
466 int status;
467 char zero_mac_addr[ETH_ALEN];
468 char *addr;
469
470 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000471 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000474 } else {
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000479 }
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 return status;
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000489 return status;
490}
491
Ron Mercer6a473302009-07-02 06:06:12 +0000492void ql_link_on(struct ql_adapter *qdev)
493{
Joe Perchesae9540f72010-02-09 11:49:52 +0000494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
497}
498
499void ql_link_off(struct ql_adapter *qdev)
500{
Joe Perchesae9540f72010-02-09 11:49:52 +0000501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
504}
505
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400506/* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
508 */
509int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510{
511 int status = 0;
512
Ron Mercer939678f2009-01-04 17:08:29 -0800513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400514 if (status)
515 goto exit;
516
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400520 if (status)
521 goto exit;
522 *value = ql_read32(qdev, RT_DATA);
523exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 return status;
525}
526
527/* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
531 */
532static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int enable)
534{
Ron Mercer8587ea32009-02-23 10:42:15 +0000535 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400536 u32 value = 0;
537
Joe Perchesae9540f72010-02-09 11:49:52 +0000538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400559
560 switch (mask) {
561 case RT_IDX_CAM_HIT:
562 {
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
576 {
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 break;
581 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583 {
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
588 break;
589 }
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591 {
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
596 break;
597 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
599 {
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
606 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000607 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
613 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000614 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 break;
618 }
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
620 {
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 break;
625 }
626 case 0: /* Clear the E-bit on an entry. */
627 {
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
631 break;
632 }
633 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400636 status = -EPERM;
637 goto exit;
638 }
639
640 if (value) {
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 if (status)
643 goto exit;
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 }
648exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 return status;
650}
651
652static void ql_enable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655}
656
657static void ql_disable_interrupts(struct ql_adapter *qdev)
658{
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660}
661
662/* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300665 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400666 * a worker finishes. Once it hits zero we enable the interrupt.
667 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 u32 var = 0;
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
673
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
677 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400678 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700679 ctx->intr_en_mask);
680 var = ql_read32(qdev, STS);
681 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
687 ctx->intr_en_mask);
688 var = ql_read32(qdev, STS);
689 }
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400692}
693
694static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695{
696 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700697 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400698
Ron Mercerbb0d2152008-10-20 10:30:26 -0700699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
701 */
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 return 0;
704
705 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000706 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400708 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700709 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400710 var = ql_read32(qdev, STS);
711 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700712 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000713 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400714 return var;
715}
716
717static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718{
719 int i;
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
724 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726 i == 0))
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400728 ql_enable_completion_interrupt(qdev, i);
729 }
730
731}
732
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000733static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734{
735 int status, i;
736 u16 csum = 0;
737 __le16 *flash = (__le16 *)&qdev->flash;
738
739 status = strncmp((char *)&qdev->flash, str, 4);
740 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000742 return status;
743 }
744
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
747
748 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000751
752 return csum;
753}
754
Ron Mercer26351472009-02-02 13:53:57 -0800755static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400756{
757 int status = 0;
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 if (status)
762 goto exit;
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 if (status)
769 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
773 */
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400775exit:
776 return status;
777}
778
Ron Mercercdca8d02009-03-02 08:07:31 +0000779static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780{
781 u32 i, size;
782 int status;
783 __le32 *p = (__le32 *)&qdev->flash;
784 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000785 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000786
787 /* Get flash offset for function and adjust
788 * for dword access.
789 */
Ron Mercere4552f52009-06-09 05:39:32 +0000790 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792 else
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 return -ETIMEDOUT;
797
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
801 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000804 goto exit;
805 }
806 }
807
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
810 "8000");
811 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000813 status = -EINVAL;
814 goto exit;
815 }
816
Ron Mercer542512e2009-06-09 05:39:33 +0000817 /* Extract either manufacturer or BOFM modified
818 * MAC address.
819 */
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
821 memcpy(mac_addr,
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
824 else
825 memcpy(mac_addr,
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
828
829 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000831 status = -EINVAL;
832 goto exit;
833 }
834
835 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000836 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000837 qdev->ndev->addr_len);
838
839exit:
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
841 return status;
842}
843
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000844static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845{
846 int i;
847 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800848 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800849 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800851
852 /* Second function's parameters follow the first
853 * function's.
854 */
Ron Mercere4552f52009-06-09 05:39:32 +0000855 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000856 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400857
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 return -ETIMEDOUT;
860
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000861 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800862 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400863 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400866 goto exit;
867 }
868
869 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000870
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
873 "8012");
874 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000876 status = -EINVAL;
877 goto exit;
878 }
879
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881 status = -EINVAL;
882 goto exit;
883 }
884
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
888
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400889exit:
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
891 return status;
892}
893
894/* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
897 */
898static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899{
900 int status;
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 if (status)
905 return status;
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
910 return status;
911}
912
913/* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
916 */
917int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918{
919 int status = 0;
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 if (status)
924 goto exit;
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930 if (status)
931 goto exit;
932 /* get the data */
933 *data = ql_read32(qdev, XGMAC_DATA);
934exit:
935 return status;
936}
937
938/* This is used for reading the 64-bit statistics regs. */
939int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940{
941 int status = 0;
942 u32 hi = 0;
943 u32 lo = 0;
944
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
946 if (status)
947 goto exit;
948
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950 if (status)
951 goto exit;
952
953 *data = (u64) lo | ((u64) hi << 32);
954
955exit:
956 return status;
957}
958
Ron Mercercdca8d02009-03-02 08:07:31 +0000959static int ql_8000_port_initialize(struct ql_adapter *qdev)
960{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000961 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000962 /*
963 * Get MPI firmware version for driver banner
964 * and ethool info.
965 */
966 status = ql_mb_about_fw(qdev);
967 if (status)
968 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000969 status = ql_mb_get_fw_state(qdev);
970 if (status)
971 goto exit;
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974exit:
975 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000976}
977
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400978/* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
982 * later date.
983 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000984static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400985{
986 int status = 0;
987 u32 data;
988
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
992 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400999 }
1000 return status;
1001 }
1002
Joe Perchesae9540f72010-02-09 11:49:52 +00001003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 if (status)
1007 goto end;
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010 if (status)
1011 goto end;
1012
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019 if (status)
1020 goto end;
1021
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 if (status)
1025 goto end;
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029 if (status)
1030 goto end;
1031
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 if (status)
1035 goto end;
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 if (status)
1040 goto end;
1041
1042 /* Turn on jumbo. */
1043 status =
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045 if (status)
1046 goto end;
1047 status =
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049 if (status)
1050 goto end;
1051
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054end:
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056 return status;
1057}
1058
Ron Mercer7c734352009-10-19 03:32:19 +00001059static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060{
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1072 return lbq_desc;
1073}
1074
Ron Mercer7c734352009-10-19 03:32:19 +00001075static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1077{
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001081 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1084
1085 /* If it's the last chunk of our master page then
1086 * we unmap it.
1087 */
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1094 return lbq_desc;
1095}
1096
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001098static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001099{
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1105 return sbq_desc;
1106}
1107
1108/* Update an rx ring index. */
1109static void ql_update_cq(struct rx_ring *rx_ring)
1110{
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1116 }
1117}
1118
1119static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120{
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122}
1123
Ron Mercer7c734352009-10-19 03:32:19 +00001124static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1126{
1127 if (!rx_ring->pg_chunk.page) {
1128 u64 map;
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130 GFP_ATOMIC,
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001135 return -ENOMEM;
1136 }
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001146 return -ENOMEM;
1147 }
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 }
1151
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1154 */
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157 /* Adjust the master page chunk for next
1158 * buffer get.
1159 */
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1164 } else {
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1168 }
1169 return 0;
1170}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171/* Process (refill) a large buffer queue. */
1172static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173{
Ron Mercer49f21862009-02-23 10:42:16 +00001174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001177 u64 map;
1178 int i;
1179
Ron Mercer7c734352009-10-19 03:32:19 +00001180 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001181 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1184 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1189 return;
1190 }
Ron Mercer7c734352009-10-19 03:32:19 +00001191
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001196 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001197 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001198
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001202 clean_idx++;
1203 if (clean_idx == rx_ring->lbq_len)
1204 clean_idx = 0;
1205 }
1206
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001211 rx_ring->lbq_free_cnt -= 16;
1212 }
1213
1214 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001220 }
1221}
1222
1223/* Process (refill) a small buffer queue. */
1224static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225{
Ron Mercer49f21862009-02-23 10:42:16 +00001226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001229 u64 map;
1230 int i;
1231
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1237 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001238 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001239 netif_printk(qdev, rx_status, KERN_DEBUG,
1240 qdev->ndev,
1241 "sbq: getting new skb for index %d.\n",
1242 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001243 sbq_desc->p.skb =
1244 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001245 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001246 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001249 rx_ring->sbq_clean_idx = clean_idx;
1250 return;
1251 }
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001257 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001260 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001263 return;
1264 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001267 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001268 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001269 }
1270
1271 clean_idx++;
1272 if (clean_idx == rx_ring->sbq_len)
1273 clean_idx = 0;
1274 }
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001279 rx_ring->sbq_free_cnt -= 16;
1280 }
1281
1282 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 }
1289}
1290
1291static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1293{
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1296}
1297
1298/* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1300 */
1301static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1303{
1304 int i;
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1307 /*
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1314 * then its an OAL.
1315 */
1316 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001317 netif_printk(qdev, tx_done, KERN_DEBUG,
1318 qdev->ndev,
1319 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001320 }
1321 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001322 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001324 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001325 maplen),
1326 PCI_DMA_TODEVICE);
1327 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001330 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001331 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001332 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001333 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001334 maplen), PCI_DMA_TODEVICE);
1335 }
1336 }
1337
1338}
1339
1340/* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342 */
1343static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346{
1347 int len = skb_headlen(skb);
1348 dma_addr_t map;
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001356 }
1357 /*
1358 * Map the skb buffer first.
1359 */
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1363 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001366
1367 return NETDEV_TX_BUSY;
1368 }
1369
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001374 map_idx++;
1375
1376 /*
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1381 * in this list.
1382 */
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385 tbd++;
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1389 * look like this:
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1403 * etc...
1404 */
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 sizeof(struct oal),
1408 PCI_DMA_TODEVICE);
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1410 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1413 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001414 goto map_error;
1415 }
1416
1417 tbd->addr = cpu_to_le64(map);
1418 /*
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1422 */
1423 tbd->len =
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001427 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431 map_idx++;
1432 }
1433
1434 map =
1435 pci_map_page(qdev->pdev, frag->page,
1436 frag->page_offset, frag->size,
1437 PCI_DMA_TODEVICE);
1438
1439 err = pci_dma_mapping_error(qdev->pdev, map);
1440 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001441 netif_err(qdev, tx_queued, qdev->ndev,
1442 "PCI mapping frags failed with error: %d.\n",
1443 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001444 goto map_error;
1445 }
1446
1447 tbd->addr = cpu_to_le64(map);
1448 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001449 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1450 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001451 frag->size);
1452
1453 }
1454 /* Save the number of segments we've mapped. */
1455 tx_ring_desc->map_cnt = map_idx;
1456 /* Terminate the last segment. */
1457 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1458 return NETDEV_TX_OK;
1459
1460map_error:
1461 /*
1462 * If the first frag mapping failed, then i will be zero.
1463 * This causes the unmap of the skb->data area. Otherwise
1464 * we pass in the number of frags that mapped successfully
1465 * so they can be umapped.
1466 */
1467 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1468 return NETDEV_TX_BUSY;
1469}
1470
Ron Mercer4f848c02010-01-02 10:37:43 +00001471/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001472static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1473 struct rx_ring *rx_ring,
1474 struct ib_mac_iocb_rsp *ib_mac_rsp,
1475 u32 length,
1476 u16 vlan_id)
1477{
1478 struct sk_buff *skb;
1479 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1480 struct skb_frag_struct *rx_frag;
1481 int nr_frags;
1482 struct napi_struct *napi = &rx_ring->napi;
1483
1484 napi->dev = qdev->ndev;
1485
1486 skb = napi_get_frags(napi);
1487 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001488 netif_err(qdev, drv, qdev->ndev,
1489 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001490 rx_ring->rx_dropped++;
1491 put_page(lbq_desc->p.pg_chunk.page);
1492 return;
1493 }
1494 prefetch(lbq_desc->p.pg_chunk.va);
1495 rx_frag = skb_shinfo(skb)->frags;
1496 nr_frags = skb_shinfo(skb)->nr_frags;
1497 rx_frag += nr_frags;
1498 rx_frag->page = lbq_desc->p.pg_chunk.page;
1499 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1500 rx_frag->size = length;
1501
1502 skb->len += length;
1503 skb->data_len += length;
1504 skb->truesize += length;
1505 skb_shinfo(skb)->nr_frags++;
1506
1507 rx_ring->rx_packets++;
1508 rx_ring->rx_bytes += length;
1509 skb->ip_summed = CHECKSUM_UNNECESSARY;
1510 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001511 if (vlan_id != 0xffff)
1512 __vlan_hwaccel_put_tag(skb, vlan_id);
1513 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001514}
1515
1516/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001517static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1518 struct rx_ring *rx_ring,
1519 struct ib_mac_iocb_rsp *ib_mac_rsp,
1520 u32 length,
1521 u16 vlan_id)
1522{
1523 struct net_device *ndev = qdev->ndev;
1524 struct sk_buff *skb = NULL;
1525 void *addr;
1526 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1527 struct napi_struct *napi = &rx_ring->napi;
1528
1529 skb = netdev_alloc_skb(ndev, length);
1530 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001531 netif_err(qdev, drv, qdev->ndev,
1532 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001533 rx_ring->rx_dropped++;
1534 put_page(lbq_desc->p.pg_chunk.page);
1535 return;
1536 }
1537
1538 addr = lbq_desc->p.pg_chunk.va;
1539 prefetch(addr);
1540
1541
1542 /* Frame error, so drop the packet. */
1543 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001544 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001545 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001546 rx_ring->rx_errors++;
1547 goto err_out;
1548 }
1549
1550 /* The max framesize filter on this chip is set higher than
1551 * MTU since FCoE uses 2k frames.
1552 */
1553 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001554 netif_err(qdev, drv, qdev->ndev,
1555 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001556 rx_ring->rx_dropped++;
1557 goto err_out;
1558 }
1559 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001560 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1561 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001563 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1564 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565 length-ETH_HLEN);
1566 skb->len += length-ETH_HLEN;
1567 skb->data_len += length-ETH_HLEN;
1568 skb->truesize += length-ETH_HLEN;
1569
1570 rx_ring->rx_packets++;
1571 rx_ring->rx_bytes += skb->len;
1572 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001573 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001574
Michał Mirosław88230fd2011-04-18 13:31:21 +00001575 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001576 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577 /* TCP frame. */
1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001579 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1580 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001581 skb->ip_summed = CHECKSUM_UNNECESSARY;
1582 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1583 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1584 /* Unfragmented ipv4 UDP frame. */
1585 struct iphdr *iph = (struct iphdr *) skb->data;
1586 if (!(iph->frag_off &
1587 cpu_to_be16(IP_MF|IP_OFFSET))) {
1588 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001589 netif_printk(qdev, rx_status, KERN_DEBUG,
1590 qdev->ndev,
1591 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001592 }
1593 }
1594 }
1595
1596 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001597 if (vlan_id != 0xffff)
1598 __vlan_hwaccel_put_tag(skb, vlan_id);
1599 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1600 napi_gro_receive(napi, skb);
1601 else
1602 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001603 return;
1604err_out:
1605 dev_kfree_skb_any(skb);
1606 put_page(lbq_desc->p.pg_chunk.page);
1607}
1608
1609/* Process an inbound completion from an rx ring. */
1610static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1611 struct rx_ring *rx_ring,
1612 struct ib_mac_iocb_rsp *ib_mac_rsp,
1613 u32 length,
1614 u16 vlan_id)
1615{
1616 struct net_device *ndev = qdev->ndev;
1617 struct sk_buff *skb = NULL;
1618 struct sk_buff *new_skb = NULL;
1619 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1620
1621 skb = sbq_desc->p.skb;
1622 /* Allocate new_skb and copy */
1623 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1624 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001625 netif_err(qdev, probe, qdev->ndev,
1626 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001627 rx_ring->rx_dropped++;
1628 return;
1629 }
1630 skb_reserve(new_skb, NET_IP_ALIGN);
1631 memcpy(skb_put(new_skb, length), skb->data, length);
1632 skb = new_skb;
1633
1634 /* Frame error, so drop the packet. */
1635 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001636 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001637 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001638 dev_kfree_skb_any(skb);
1639 rx_ring->rx_errors++;
1640 return;
1641 }
1642
1643 /* loopback self test for ethtool */
1644 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1645 ql_check_lb_frame(qdev, skb);
1646 dev_kfree_skb_any(skb);
1647 return;
1648 }
1649
1650 /* The max framesize filter on this chip is set higher than
1651 * MTU since FCoE uses 2k frames.
1652 */
1653 if (skb->len > ndev->mtu + ETH_HLEN) {
1654 dev_kfree_skb_any(skb);
1655 rx_ring->rx_dropped++;
1656 return;
1657 }
1658
1659 prefetch(skb->data);
1660 skb->dev = ndev;
1661 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001662 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1663 "%s Multicast.\n",
1664 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1665 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1666 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1667 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1668 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001670 }
1671 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001672 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001674
1675 rx_ring->rx_packets++;
1676 rx_ring->rx_bytes += skb->len;
1677 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001678 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001679
1680 /* If rx checksum is on, and there are no
1681 * csum or frame errors.
1682 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001683 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001684 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1685 /* TCP frame. */
1686 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001687 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1688 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001689 skb->ip_summed = CHECKSUM_UNNECESSARY;
1690 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1691 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1692 /* Unfragmented ipv4 UDP frame. */
1693 struct iphdr *iph = (struct iphdr *) skb->data;
1694 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001695 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001696 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001697 netif_printk(qdev, rx_status, KERN_DEBUG,
1698 qdev->ndev,
1699 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001700 }
1701 }
1702 }
1703
1704 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001705 if (vlan_id != 0xffff)
1706 __vlan_hwaccel_put_tag(skb, vlan_id);
1707 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1708 napi_gro_receive(&rx_ring->napi, skb);
1709 else
1710 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001711}
1712
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001713static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001714{
1715 void *temp_addr = skb->data;
1716
1717 /* Undo the skb_reserve(skb,32) we did before
1718 * giving to hardware, and realign data on
1719 * a 2-byte boundary.
1720 */
1721 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1722 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1723 skb_copy_to_linear_data(skb, temp_addr,
1724 (unsigned int)len);
1725}
1726
1727/*
1728 * This function builds an skb for the given inbound
1729 * completion. It will be rewritten for readability in the near
1730 * future, but for not it works well.
1731 */
1732static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1733 struct rx_ring *rx_ring,
1734 struct ib_mac_iocb_rsp *ib_mac_rsp)
1735{
1736 struct bq_desc *lbq_desc;
1737 struct bq_desc *sbq_desc;
1738 struct sk_buff *skb = NULL;
1739 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1740 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1741
1742 /*
1743 * Handle the header buffer if present.
1744 */
1745 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1746 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001747 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001749 /*
1750 * Headers fit nicely into a small buffer.
1751 */
1752 sbq_desc = ql_get_curr_sbuf(rx_ring);
1753 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001754 dma_unmap_addr(sbq_desc, mapaddr),
1755 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756 PCI_DMA_FROMDEVICE);
1757 skb = sbq_desc->p.skb;
1758 ql_realign_skb(skb, hdr_len);
1759 skb_put(skb, hdr_len);
1760 sbq_desc->p.skb = NULL;
1761 }
1762
1763 /*
1764 * Handle the data buffer(s).
1765 */
1766 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001767 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1768 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001769 return skb;
1770 }
1771
1772 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1773 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "Headers in small, data of %d bytes in small, combine them.\n",
1776 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001777 /*
1778 * Data is less than small buffer size so it's
1779 * stuffed in a small buffer.
1780 * For this case we append the data
1781 * from the "data" small buffer to the "header" small
1782 * buffer.
1783 */
1784 sbq_desc = ql_get_curr_sbuf(rx_ring);
1785 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001786 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001787 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001788 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001789 (sbq_desc, maplen),
1790 PCI_DMA_FROMDEVICE);
1791 memcpy(skb_put(skb, length),
1792 sbq_desc->p.skb->data, length);
1793 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001794 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 (sbq_desc,
1796 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001797 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001798 (sbq_desc,
1799 maplen),
1800 PCI_DMA_FROMDEVICE);
1801 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001802 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1803 "%d bytes in a single small buffer.\n",
1804 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 sbq_desc = ql_get_curr_sbuf(rx_ring);
1806 skb = sbq_desc->p.skb;
1807 ql_realign_skb(skb, length);
1808 skb_put(skb, length);
1809 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001810 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001811 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001812 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001813 maplen),
1814 PCI_DMA_FROMDEVICE);
1815 sbq_desc->p.skb = NULL;
1816 }
1817 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1818 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "Header in small, %d bytes in large. Chain large to small!\n",
1821 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001822 /*
1823 * The data is in a single large buffer. We
1824 * chain it to the header buffer's skb and let
1825 * it rip.
1826 */
Ron Mercer7c734352009-10-19 03:32:19 +00001827 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001828 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829 "Chaining page at offset = %d, for %d bytes to skb.\n",
1830 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001831 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1832 lbq_desc->p.pg_chunk.offset,
1833 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001834 skb->len += length;
1835 skb->data_len += length;
1836 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001837 } else {
1838 /*
1839 * The headers and data are in a single large buffer. We
1840 * copy it to a new skb and let it go. This can happen with
1841 * jumbo mtu on a non-TCP/UDP frame.
1842 */
Ron Mercer7c734352009-10-19 03:32:19 +00001843 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001844 skb = netdev_alloc_skb(qdev->ndev, length);
1845 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001846 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1847 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001848 return NULL;
1849 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001850 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001851 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001852 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001853 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001854 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001856 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1858 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001859 skb_fill_page_desc(skb, 0,
1860 lbq_desc->p.pg_chunk.page,
1861 lbq_desc->p.pg_chunk.offset,
1862 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 skb->len += length;
1864 skb->data_len += length;
1865 skb->truesize += length;
1866 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001867 __pskb_pull_tail(skb,
1868 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1869 VLAN_ETH_HLEN : ETH_HLEN);
1870 }
1871 } else {
1872 /*
1873 * The data is in a chain of large buffers
1874 * pointed to by a small buffer. We loop
1875 * thru and chain them to the our small header
1876 * buffer's skb.
1877 * frags: There are 18 max frags and our small
1878 * buffer will hold 32 of them. The thing is,
1879 * we'll use 3 max for our 9000 byte jumbo
1880 * frames. If the MTU goes up we could
1881 * eventually be in trouble.
1882 */
Ron Mercer7c734352009-10-19 03:32:19 +00001883 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001884 sbq_desc = ql_get_curr_sbuf(rx_ring);
1885 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001886 dma_unmap_addr(sbq_desc, mapaddr),
1887 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001888 PCI_DMA_FROMDEVICE);
1889 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1890 /*
1891 * This is an non TCP/UDP IP frame, so
1892 * the headers aren't split into a small
1893 * buffer. We have to use the small buffer
1894 * that contains our sg list as our skb to
1895 * send upstairs. Copy the sg list here to
1896 * a local buffer and use it to find the
1897 * pages to chain.
1898 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001899 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1900 "%d bytes of headers & data in chain of large.\n",
1901 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001902 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001903 sbq_desc->p.skb = NULL;
1904 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001905 }
1906 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001907 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1908 size = (length < rx_ring->lbq_buf_size) ? length :
1909 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001910
Joe Perchesae9540f72010-02-09 11:49:52 +00001911 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1912 "Adding page %d to skb for %d bytes.\n",
1913 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001914 skb_fill_page_desc(skb, i,
1915 lbq_desc->p.pg_chunk.page,
1916 lbq_desc->p.pg_chunk.offset,
1917 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001918 skb->len += size;
1919 skb->data_len += size;
1920 skb->truesize += size;
1921 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001922 i++;
1923 }
1924 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1925 VLAN_ETH_HLEN : ETH_HLEN);
1926 }
1927 return skb;
1928}
1929
1930/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001931static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001932 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001933 struct ib_mac_iocb_rsp *ib_mac_rsp,
1934 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001935{
1936 struct net_device *ndev = qdev->ndev;
1937 struct sk_buff *skb = NULL;
1938
1939 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1940
1941 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1942 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001943 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1944 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001945 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001946 return;
1947 }
1948
Ron Mercera32959c2009-06-09 05:39:27 +00001949 /* Frame error, so drop the packet. */
1950 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001951 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001952 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001953 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001954 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001955 return;
1956 }
Ron Mercerec33a492009-06-09 05:39:28 +00001957
1958 /* The max framesize filter on this chip is set higher than
1959 * MTU since FCoE uses 2k frames.
1960 */
1961 if (skb->len > ndev->mtu + ETH_HLEN) {
1962 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001963 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001964 return;
1965 }
1966
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001967 /* loopback self test for ethtool */
1968 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1969 ql_check_lb_frame(qdev, skb);
1970 dev_kfree_skb_any(skb);
1971 return;
1972 }
1973
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001974 prefetch(skb->data);
1975 skb->dev = ndev;
1976 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001977 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1978 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1979 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1980 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1981 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1982 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1983 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001984 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001985 }
1986 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001987 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1988 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001989 }
Ron Mercerd555f592009-03-09 10:59:19 +00001990
Ron Mercerd555f592009-03-09 10:59:19 +00001991 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001992 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001993
1994 /* If rx checksum is on, and there are no
1995 * csum or frame errors.
1996 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001997 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001998 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1999 /* TCP frame. */
2000 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002001 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2002 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002003 skb->ip_summed = CHECKSUM_UNNECESSARY;
2004 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2005 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2006 /* Unfragmented ipv4 UDP frame. */
2007 struct iphdr *iph = (struct iphdr *) skb->data;
2008 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002009 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002010 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002013 }
2014 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002015 }
Ron Mercerd555f592009-03-09 10:59:19 +00002016
Ron Mercer885ee392009-11-03 13:49:31 +00002017 rx_ring->rx_packets++;
2018 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002019 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002020 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2021 __vlan_hwaccel_put_tag(skb, vlan_id);
2022 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2023 napi_gro_receive(&rx_ring->napi, skb);
2024 else
2025 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002026}
2027
Ron Mercer4f848c02010-01-02 10:37:43 +00002028/* Process an inbound completion from an rx ring. */
2029static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2030 struct rx_ring *rx_ring,
2031 struct ib_mac_iocb_rsp *ib_mac_rsp)
2032{
2033 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2034 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2035 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2036 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2037
2038 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2039
2040 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2041 /* The data and headers are split into
2042 * separate buffers.
2043 */
2044 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2045 vlan_id);
2046 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2047 /* The data fit in a single small buffer.
2048 * Allocate a new skb, copy the data and
2049 * return the buffer to the free pool.
2050 */
2051 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2052 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002053 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2054 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2055 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2056 /* TCP packet in a page chunk that's been checksummed.
2057 * Tack it on to our GRO skb and let it go.
2058 */
2059 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2060 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002061 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2062 /* Non-TCP packet in a page chunk. Allocate an
2063 * skb, tack it on frags, and send it up.
2064 */
2065 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2066 length, vlan_id);
2067 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002068 /* Non-TCP/UDP large frames that span multiple buffers
2069 * can be processed corrrectly by the split frame logic.
2070 */
2071 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2072 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002073 }
2074
2075 return (unsigned long)length;
2076}
2077
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002078/* Process an outbound completion from an rx ring. */
2079static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2080 struct ob_mac_iocb_rsp *mac_rsp)
2081{
2082 struct tx_ring *tx_ring;
2083 struct tx_ring_desc *tx_ring_desc;
2084
2085 QL_DUMP_OB_MAC_RSP(mac_rsp);
2086 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2087 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2088 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002089 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2090 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002091 dev_kfree_skb(tx_ring_desc->skb);
2092 tx_ring_desc->skb = NULL;
2093
2094 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2095 OB_MAC_IOCB_RSP_S |
2096 OB_MAC_IOCB_RSP_L |
2097 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2098 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002099 netif_warn(qdev, tx_done, qdev->ndev,
2100 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002101 }
2102 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002103 netif_warn(qdev, tx_done, qdev->ndev,
2104 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002105 }
2106 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002107 netif_warn(qdev, tx_done, qdev->ndev,
2108 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002109 }
2110 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002111 netif_warn(qdev, tx_done, qdev->ndev,
2112 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002113 }
2114 }
2115 atomic_inc(&tx_ring->tx_count);
2116}
2117
2118/* Fire up a handler to reset the MPI processor. */
2119void ql_queue_fw_error(struct ql_adapter *qdev)
2120{
Ron Mercer6a473302009-07-02 06:06:12 +00002121 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2123}
2124
2125void ql_queue_asic_error(struct ql_adapter *qdev)
2126{
Ron Mercer6a473302009-07-02 06:06:12 +00002127 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002128 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002129 /* Clear adapter up bit to signal the recovery
2130 * process that it shouldn't kill the reset worker
2131 * thread
2132 */
2133 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002134 /* Set asic recovery bit to indicate reset process that we are
2135 * in fatal error recovery process rather than normal close
2136 */
2137 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002138 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2139}
2140
2141static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2142 struct ib_ae_iocb_rsp *ib_ae_rsp)
2143{
2144 switch (ib_ae_rsp->event) {
2145 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002146 netif_err(qdev, rx_err, qdev->ndev,
2147 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002148 ql_queue_fw_error(qdev);
2149 return;
2150
2151 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002152 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2153 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002154 ql_queue_asic_error(qdev);
2155 return;
2156
2157 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002158 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002159 ql_queue_asic_error(qdev);
2160 break;
2161
2162 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002163 netdev_err(qdev->ndev, "PCI error occurred when reading "
2164 "anonymous buffers from rx_ring %d.\n",
2165 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 ql_queue_asic_error(qdev);
2167 break;
2168
2169 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002170 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2171 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002172 ql_queue_asic_error(qdev);
2173 break;
2174 }
2175}
2176
2177static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2178{
2179 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002180 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002181 struct ob_mac_iocb_rsp *net_rsp = NULL;
2182 int count = 0;
2183
Ron Mercer1e213302009-03-09 10:59:21 +00002184 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185 /* While there are entries in the completion queue. */
2186 while (prod != rx_ring->cnsmr_idx) {
2187
Joe Perchesae9540f72010-02-09 11:49:52 +00002188 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2189 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2190 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002191
2192 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2193 rmb();
2194 switch (net_rsp->opcode) {
2195
2196 case OPCODE_OB_MAC_TSO_IOCB:
2197 case OPCODE_OB_MAC_IOCB:
2198 ql_process_mac_tx_intr(qdev, net_rsp);
2199 break;
2200 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002201 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2202 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2203 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002204 }
2205 count++;
2206 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002207 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002208 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002209 if (!net_rsp)
2210 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002211 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002212 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002213 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002214 if (atomic_read(&tx_ring->queue_stopped) &&
2215 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2216 /*
2217 * The queue got stopped because the tx_ring was full.
2218 * Wake it up, because it's now at least 25% empty.
2219 */
Ron Mercer1e213302009-03-09 10:59:21 +00002220 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002221 }
2222
2223 return count;
2224}
2225
2226static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2227{
2228 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002229 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002230 struct ql_net_rsp_iocb *net_rsp;
2231 int count = 0;
2232
2233 /* While there are entries in the completion queue. */
2234 while (prod != rx_ring->cnsmr_idx) {
2235
Joe Perchesae9540f72010-02-09 11:49:52 +00002236 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2237 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2238 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239
2240 net_rsp = rx_ring->curr_entry;
2241 rmb();
2242 switch (net_rsp->opcode) {
2243 case OPCODE_IB_MAC_IOCB:
2244 ql_process_mac_rx_intr(qdev, rx_ring,
2245 (struct ib_mac_iocb_rsp *)
2246 net_rsp);
2247 break;
2248
2249 case OPCODE_IB_AE_IOCB:
2250 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2251 net_rsp);
2252 break;
2253 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002254 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2255 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2256 net_rsp->opcode);
2257 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002258 }
2259 count++;
2260 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002261 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002262 if (count == budget)
2263 break;
2264 }
2265 ql_update_buffer_queues(qdev, rx_ring);
2266 ql_write_cq_idx(rx_ring);
2267 return count;
2268}
2269
2270static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2271{
2272 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2273 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002274 struct rx_ring *trx_ring;
2275 int i, work_done = 0;
2276 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002277
Joe Perchesae9540f72010-02-09 11:49:52 +00002278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002280
Ron Mercer39aa8162009-08-27 11:02:11 +00002281 /* Service the TX rings first. They start
2282 * right after the RSS rings. */
2283 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2284 trx_ring = &qdev->rx_ring[i];
2285 /* If this TX completion ring belongs to this vector and
2286 * it's not empty then service it.
2287 */
2288 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2289 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2290 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002291 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2292 "%s: Servicing TX completion ring %d.\n",
2293 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002294 ql_clean_outbound_rx_ring(trx_ring);
2295 }
2296 }
2297
2298 /*
2299 * Now service the RSS ring if it's active.
2300 */
2301 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2302 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002303 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2304 "%s: Servicing RX completion ring %d.\n",
2305 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002306 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2307 }
2308
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002309 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002310 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002311 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2312 }
2313 return work_done;
2314}
2315
Jiri Pirko18c49b92011-07-21 03:24:11 +00002316static void qlge_vlan_mode(struct net_device *ndev, u32 features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002317{
2318 struct ql_adapter *qdev = netdev_priv(ndev);
2319
Jiri Pirko18c49b92011-07-21 03:24:11 +00002320 if (features & NETIF_F_HW_VLAN_RX) {
2321 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002322 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002323 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002324 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002325 } else {
Jiri Pirko18c49b92011-07-21 03:24:11 +00002326 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002327 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2329 }
2330}
2331
Jiri Pirko18c49b92011-07-21 03:24:11 +00002332static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2333{
2334 /*
2335 * Since there is no support for separate rx/tx vlan accel
2336 * enable/disable make sure tx flag is always in same state as rx.
2337 */
2338 if (features & NETIF_F_HW_VLAN_RX)
2339 features |= NETIF_F_HW_VLAN_TX;
2340 else
2341 features &= ~NETIF_F_HW_VLAN_TX;
2342
2343 return features;
2344}
2345
2346static int qlge_set_features(struct net_device *ndev, u32 features)
2347{
2348 u32 changed = ndev->features ^ features;
2349
2350 if (changed & NETIF_F_HW_VLAN_RX)
2351 qlge_vlan_mode(ndev, features);
2352
2353 return 0;
2354}
2355
2356static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2357{
2358 u32 enable_bit = MAC_ADDR_E;
2359
2360 if (ql_set_mac_addr_reg
2361 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2362 netif_err(qdev, ifup, qdev->ndev,
2363 "Failed to init vlan address.\n");
2364 }
2365}
2366
Ron Mercer01e6b952009-10-30 12:13:34 +00002367static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002368{
2369 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002370 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002371
Ron Mercercc288f52009-02-23 10:42:14 +00002372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373 if (status)
2374 return;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002375
2376 __qlge_vlan_rx_add_vid(qdev, vid);
2377 set_bit(vid, qdev->active_vlans);
2378
Ron Mercercc288f52009-02-23 10:42:14 +00002379 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002380}
2381
Jiri Pirko18c49b92011-07-21 03:24:11 +00002382static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002383{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384 u32 enable_bit = 0;
2385
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002386 if (ql_set_mac_addr_reg
2387 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002388 netif_err(qdev, ifup, qdev->ndev,
2389 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002390 }
Jiri Pirko18c49b92011-07-21 03:24:11 +00002391}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002392
Jiri Pirko18c49b92011-07-21 03:24:11 +00002393static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2394{
2395 struct ql_adapter *qdev = netdev_priv(ndev);
2396 int status;
2397
2398 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2399 if (status)
2400 return;
2401
2402 __qlge_vlan_rx_kill_vid(qdev, vid);
2403 clear_bit(vid, qdev->active_vlans);
2404
2405 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002406}
2407
Ron Mercerc1b60092010-10-27 04:58:12 +00002408static void qlge_restore_vlan(struct ql_adapter *qdev)
2409{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002410 int status;
2411 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002412
Jiri Pirko18c49b92011-07-21 03:24:11 +00002413 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2414 if (status)
2415 return;
2416
2417 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2418 __qlge_vlan_rx_add_vid(qdev, vid);
2419
2420 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002421}
2422
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002423/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2424static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2425{
2426 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002427 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002428 return IRQ_HANDLED;
2429}
2430
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002431/* This handles a fatal error, MPI activity, and the default
2432 * rx_ring in an MSI-X multiple vector environment.
2433 * In MSI/Legacy environment it also process the rest of
2434 * the rx_rings.
2435 */
2436static irqreturn_t qlge_isr(int irq, void *dev_id)
2437{
2438 struct rx_ring *rx_ring = dev_id;
2439 struct ql_adapter *qdev = rx_ring->qdev;
2440 struct intr_context *intr_context = &qdev->intr_context[0];
2441 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002442 int work_done = 0;
2443
Ron Mercerbb0d2152008-10-20 10:30:26 -07002444 spin_lock(&qdev->hw_lock);
2445 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002446 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2447 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002448 spin_unlock(&qdev->hw_lock);
2449 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002450 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002451 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002452
Ron Mercerbb0d2152008-10-20 10:30:26 -07002453 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002454
2455 /*
2456 * Check for fatal error.
2457 */
2458 if (var & STS_FE) {
2459 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002460 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002461 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002462 netdev_err(qdev->ndev, "Resetting chip. "
2463 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002464 return IRQ_HANDLED;
2465 }
2466
2467 /*
2468 * Check MPI processor activity.
2469 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002470 if ((var & STS_PI) &&
2471 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002472 /*
2473 * We've got an async event or mailbox completion.
2474 * Handle it and clear the source of the interrupt.
2475 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002476 netif_err(qdev, intr, qdev->ndev,
2477 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002478 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002479 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2480 queue_delayed_work_on(smp_processor_id(),
2481 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002482 work_done++;
2483 }
2484
2485 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002486 * Get the bit-mask that shows the active queues for this
2487 * pass. Compare it to the queues that this irq services
2488 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002489 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002490 var = ql_read32(qdev, ISR1);
2491 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002492 netif_info(qdev, intr, qdev->ndev,
2493 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002494 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002495 napi_schedule(&rx_ring->napi);
2496 work_done++;
2497 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002498 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002499 return work_done ? IRQ_HANDLED : IRQ_NONE;
2500}
2501
2502static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2503{
2504
2505 if (skb_is_gso(skb)) {
2506 int err;
2507 if (skb_header_cloned(skb)) {
2508 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2509 if (err)
2510 return err;
2511 }
2512
2513 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2515 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2516 mac_iocb_ptr->total_hdrs_len =
2517 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2518 mac_iocb_ptr->net_trans_offset =
2519 cpu_to_le16(skb_network_offset(skb) |
2520 skb_transport_offset(skb)
2521 << OB_MAC_TRANSPORT_HDR_SHIFT);
2522 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2523 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2524 if (likely(skb->protocol == htons(ETH_P_IP))) {
2525 struct iphdr *iph = ip_hdr(skb);
2526 iph->check = 0;
2527 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2528 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2529 iph->daddr, 0,
2530 IPPROTO_TCP,
2531 0);
2532 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2533 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2534 tcp_hdr(skb)->check =
2535 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2536 &ipv6_hdr(skb)->daddr,
2537 0, IPPROTO_TCP, 0);
2538 }
2539 return 1;
2540 }
2541 return 0;
2542}
2543
2544static void ql_hw_csum_setup(struct sk_buff *skb,
2545 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2546{
2547 int len;
2548 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002549 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002550 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2551 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2552 mac_iocb_ptr->net_trans_offset =
2553 cpu_to_le16(skb_network_offset(skb) |
2554 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2555
2556 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2557 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2558 if (likely(iph->protocol == IPPROTO_TCP)) {
2559 check = &(tcp_hdr(skb)->check);
2560 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2561 mac_iocb_ptr->total_hdrs_len =
2562 cpu_to_le16(skb_transport_offset(skb) +
2563 (tcp_hdr(skb)->doff << 2));
2564 } else {
2565 check = &(udp_hdr(skb)->check);
2566 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2567 mac_iocb_ptr->total_hdrs_len =
2568 cpu_to_le16(skb_transport_offset(skb) +
2569 sizeof(struct udphdr));
2570 }
2571 *check = ~csum_tcpudp_magic(iph->saddr,
2572 iph->daddr, len, iph->protocol, 0);
2573}
2574
Stephen Hemminger613573252009-08-31 19:50:58 +00002575static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002576{
2577 struct tx_ring_desc *tx_ring_desc;
2578 struct ob_mac_iocb_req *mac_iocb_ptr;
2579 struct ql_adapter *qdev = netdev_priv(ndev);
2580 int tso;
2581 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002582 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002583
2584 tx_ring = &qdev->tx_ring[tx_ring_idx];
2585
Ron Mercer74c50b42009-03-09 10:59:27 +00002586 if (skb_padto(skb, ETH_ZLEN))
2587 return NETDEV_TX_OK;
2588
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002589 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002590 netif_info(qdev, tx_queued, qdev->ndev,
2591 "%s: shutting down tx queue %d du to lack of resources.\n",
2592 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002593 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002594 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002595 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002596 return NETDEV_TX_BUSY;
2597 }
2598 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2599 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002600 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002601
2602 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2603 mac_iocb_ptr->tid = tx_ring_desc->index;
2604 /* We use the upper 32-bits to store the tx queue for this IO.
2605 * When we get the completion we can use it to establish the context.
2606 */
2607 mac_iocb_ptr->txq_idx = tx_ring_idx;
2608 tx_ring_desc->skb = skb;
2609
2610 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2611
Jesse Grosseab6d182010-10-20 13:56:03 +00002612 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002613 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2614 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002615 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2616 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2617 }
2618 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2619 if (tso < 0) {
2620 dev_kfree_skb_any(skb);
2621 return NETDEV_TX_OK;
2622 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2623 ql_hw_csum_setup(skb,
2624 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2625 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002626 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2627 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002628 netif_err(qdev, tx_queued, qdev->ndev,
2629 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002630 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002631 return NETDEV_TX_BUSY;
2632 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002633 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2634 tx_ring->prod_idx++;
2635 if (tx_ring->prod_idx == tx_ring->wq_len)
2636 tx_ring->prod_idx = 0;
2637 wmb();
2638
2639 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002640 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2641 "tx queued, slot %d, len %d\n",
2642 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002643
2644 atomic_dec(&tx_ring->tx_count);
2645 return NETDEV_TX_OK;
2646}
2647
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002648
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002649static void ql_free_shadow_space(struct ql_adapter *qdev)
2650{
2651 if (qdev->rx_ring_shadow_reg_area) {
2652 pci_free_consistent(qdev->pdev,
2653 PAGE_SIZE,
2654 qdev->rx_ring_shadow_reg_area,
2655 qdev->rx_ring_shadow_reg_dma);
2656 qdev->rx_ring_shadow_reg_area = NULL;
2657 }
2658 if (qdev->tx_ring_shadow_reg_area) {
2659 pci_free_consistent(qdev->pdev,
2660 PAGE_SIZE,
2661 qdev->tx_ring_shadow_reg_area,
2662 qdev->tx_ring_shadow_reg_dma);
2663 qdev->tx_ring_shadow_reg_area = NULL;
2664 }
2665}
2666
2667static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2668{
2669 qdev->rx_ring_shadow_reg_area =
2670 pci_alloc_consistent(qdev->pdev,
2671 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2672 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002673 netif_err(qdev, ifup, qdev->ndev,
2674 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002675 return -ENOMEM;
2676 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002677 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002678 qdev->tx_ring_shadow_reg_area =
2679 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2680 &qdev->tx_ring_shadow_reg_dma);
2681 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002682 netif_err(qdev, ifup, qdev->ndev,
2683 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002684 goto err_wqp_sh_area;
2685 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002686 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002687 return 0;
2688
2689err_wqp_sh_area:
2690 pci_free_consistent(qdev->pdev,
2691 PAGE_SIZE,
2692 qdev->rx_ring_shadow_reg_area,
2693 qdev->rx_ring_shadow_reg_dma);
2694 return -ENOMEM;
2695}
2696
2697static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2698{
2699 struct tx_ring_desc *tx_ring_desc;
2700 int i;
2701 struct ob_mac_iocb_req *mac_iocb_ptr;
2702
2703 mac_iocb_ptr = tx_ring->wq_base;
2704 tx_ring_desc = tx_ring->q;
2705 for (i = 0; i < tx_ring->wq_len; i++) {
2706 tx_ring_desc->index = i;
2707 tx_ring_desc->skb = NULL;
2708 tx_ring_desc->queue_entry = mac_iocb_ptr;
2709 mac_iocb_ptr++;
2710 tx_ring_desc++;
2711 }
2712 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2713 atomic_set(&tx_ring->queue_stopped, 0);
2714}
2715
2716static void ql_free_tx_resources(struct ql_adapter *qdev,
2717 struct tx_ring *tx_ring)
2718{
2719 if (tx_ring->wq_base) {
2720 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2721 tx_ring->wq_base, tx_ring->wq_base_dma);
2722 tx_ring->wq_base = NULL;
2723 }
2724 kfree(tx_ring->q);
2725 tx_ring->q = NULL;
2726}
2727
2728static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2729 struct tx_ring *tx_ring)
2730{
2731 tx_ring->wq_base =
2732 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2733 &tx_ring->wq_base_dma);
2734
Joe Perches8e95a202009-12-03 07:58:21 +00002735 if ((tx_ring->wq_base == NULL) ||
2736 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002737 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002738 return -ENOMEM;
2739 }
2740 tx_ring->q =
2741 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2742 if (tx_ring->q == NULL)
2743 goto err;
2744
2745 return 0;
2746err:
2747 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2748 tx_ring->wq_base, tx_ring->wq_base_dma);
2749 return -ENOMEM;
2750}
2751
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002752static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002753{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002754 struct bq_desc *lbq_desc;
2755
Ron Mercer7c734352009-10-19 03:32:19 +00002756 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002757
Ron Mercer7c734352009-10-19 03:32:19 +00002758 curr_idx = rx_ring->lbq_curr_idx;
2759 clean_idx = rx_ring->lbq_clean_idx;
2760 while (curr_idx != clean_idx) {
2761 lbq_desc = &rx_ring->lbq[curr_idx];
2762
2763 if (lbq_desc->p.pg_chunk.last_flag) {
2764 pci_unmap_page(qdev->pdev,
2765 lbq_desc->p.pg_chunk.map,
2766 ql_lbq_block_size(qdev),
2767 PCI_DMA_FROMDEVICE);
2768 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002769 }
Ron Mercer7c734352009-10-19 03:32:19 +00002770
2771 put_page(lbq_desc->p.pg_chunk.page);
2772 lbq_desc->p.pg_chunk.page = NULL;
2773
2774 if (++curr_idx == rx_ring->lbq_len)
2775 curr_idx = 0;
2776
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002777 }
2778}
2779
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002780static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002781{
2782 int i;
2783 struct bq_desc *sbq_desc;
2784
2785 for (i = 0; i < rx_ring->sbq_len; i++) {
2786 sbq_desc = &rx_ring->sbq[i];
2787 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002788 netif_err(qdev, ifup, qdev->ndev,
2789 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002790 return;
2791 }
2792 if (sbq_desc->p.skb) {
2793 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002794 dma_unmap_addr(sbq_desc, mapaddr),
2795 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002796 PCI_DMA_FROMDEVICE);
2797 dev_kfree_skb(sbq_desc->p.skb);
2798 sbq_desc->p.skb = NULL;
2799 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002800 }
2801}
2802
Ron Mercer4545a3f2009-02-23 10:42:17 +00002803/* Free all large and small rx buffers associated
2804 * with the completion queues for this device.
2805 */
2806static void ql_free_rx_buffers(struct ql_adapter *qdev)
2807{
2808 int i;
2809 struct rx_ring *rx_ring;
2810
2811 for (i = 0; i < qdev->rx_ring_count; i++) {
2812 rx_ring = &qdev->rx_ring[i];
2813 if (rx_ring->lbq)
2814 ql_free_lbq_buffers(qdev, rx_ring);
2815 if (rx_ring->sbq)
2816 ql_free_sbq_buffers(qdev, rx_ring);
2817 }
2818}
2819
2820static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2821{
2822 struct rx_ring *rx_ring;
2823 int i;
2824
2825 for (i = 0; i < qdev->rx_ring_count; i++) {
2826 rx_ring = &qdev->rx_ring[i];
2827 if (rx_ring->type != TX_Q)
2828 ql_update_buffer_queues(qdev, rx_ring);
2829 }
2830}
2831
2832static void ql_init_lbq_ring(struct ql_adapter *qdev,
2833 struct rx_ring *rx_ring)
2834{
2835 int i;
2836 struct bq_desc *lbq_desc;
2837 __le64 *bq = rx_ring->lbq_base;
2838
2839 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2840 for (i = 0; i < rx_ring->lbq_len; i++) {
2841 lbq_desc = &rx_ring->lbq[i];
2842 memset(lbq_desc, 0, sizeof(*lbq_desc));
2843 lbq_desc->index = i;
2844 lbq_desc->addr = bq;
2845 bq++;
2846 }
2847}
2848
2849static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002850 struct rx_ring *rx_ring)
2851{
2852 int i;
2853 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002854 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002855
Ron Mercer4545a3f2009-02-23 10:42:17 +00002856 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002857 for (i = 0; i < rx_ring->sbq_len; i++) {
2858 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002859 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002860 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002861 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002862 bq++;
2863 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002864}
2865
2866static void ql_free_rx_resources(struct ql_adapter *qdev,
2867 struct rx_ring *rx_ring)
2868{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002869 /* Free the small buffer queue. */
2870 if (rx_ring->sbq_base) {
2871 pci_free_consistent(qdev->pdev,
2872 rx_ring->sbq_size,
2873 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2874 rx_ring->sbq_base = NULL;
2875 }
2876
2877 /* Free the small buffer queue control blocks. */
2878 kfree(rx_ring->sbq);
2879 rx_ring->sbq = NULL;
2880
2881 /* Free the large buffer queue. */
2882 if (rx_ring->lbq_base) {
2883 pci_free_consistent(qdev->pdev,
2884 rx_ring->lbq_size,
2885 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2886 rx_ring->lbq_base = NULL;
2887 }
2888
2889 /* Free the large buffer queue control blocks. */
2890 kfree(rx_ring->lbq);
2891 rx_ring->lbq = NULL;
2892
2893 /* Free the rx queue. */
2894 if (rx_ring->cq_base) {
2895 pci_free_consistent(qdev->pdev,
2896 rx_ring->cq_size,
2897 rx_ring->cq_base, rx_ring->cq_base_dma);
2898 rx_ring->cq_base = NULL;
2899 }
2900}
2901
2902/* Allocate queues and buffers for this completions queue based
2903 * on the values in the parameter structure. */
2904static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2905 struct rx_ring *rx_ring)
2906{
2907
2908 /*
2909 * Allocate the completion queue for this rx_ring.
2910 */
2911 rx_ring->cq_base =
2912 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2913 &rx_ring->cq_base_dma);
2914
2915 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002916 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002917 return -ENOMEM;
2918 }
2919
2920 if (rx_ring->sbq_len) {
2921 /*
2922 * Allocate small buffer queue.
2923 */
2924 rx_ring->sbq_base =
2925 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2926 &rx_ring->sbq_base_dma);
2927
2928 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002929 netif_err(qdev, ifup, qdev->ndev,
2930 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002931 goto err_mem;
2932 }
2933
2934 /*
2935 * Allocate small buffer queue control blocks.
2936 */
2937 rx_ring->sbq =
2938 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2939 GFP_KERNEL);
2940 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002941 netif_err(qdev, ifup, qdev->ndev,
2942 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002943 goto err_mem;
2944 }
2945
Ron Mercer4545a3f2009-02-23 10:42:17 +00002946 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002947 }
2948
2949 if (rx_ring->lbq_len) {
2950 /*
2951 * Allocate large buffer queue.
2952 */
2953 rx_ring->lbq_base =
2954 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2955 &rx_ring->lbq_base_dma);
2956
2957 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002958 netif_err(qdev, ifup, qdev->ndev,
2959 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002960 goto err_mem;
2961 }
2962 /*
2963 * Allocate large buffer queue control blocks.
2964 */
2965 rx_ring->lbq =
2966 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2967 GFP_KERNEL);
2968 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002969 netif_err(qdev, ifup, qdev->ndev,
2970 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002971 goto err_mem;
2972 }
2973
Ron Mercer4545a3f2009-02-23 10:42:17 +00002974 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002975 }
2976
2977 return 0;
2978
2979err_mem:
2980 ql_free_rx_resources(qdev, rx_ring);
2981 return -ENOMEM;
2982}
2983
2984static void ql_tx_ring_clean(struct ql_adapter *qdev)
2985{
2986 struct tx_ring *tx_ring;
2987 struct tx_ring_desc *tx_ring_desc;
2988 int i, j;
2989
2990 /*
2991 * Loop through all queues and free
2992 * any resources.
2993 */
2994 for (j = 0; j < qdev->tx_ring_count; j++) {
2995 tx_ring = &qdev->tx_ring[j];
2996 for (i = 0; i < tx_ring->wq_len; i++) {
2997 tx_ring_desc = &tx_ring->q[i];
2998 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002999 netif_err(qdev, ifdown, qdev->ndev,
3000 "Freeing lost SKB %p, from queue %d, index %d.\n",
3001 tx_ring_desc->skb, j,
3002 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003003 ql_unmap_send(qdev, tx_ring_desc,
3004 tx_ring_desc->map_cnt);
3005 dev_kfree_skb(tx_ring_desc->skb);
3006 tx_ring_desc->skb = NULL;
3007 }
3008 }
3009 }
3010}
3011
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003012static void ql_free_mem_resources(struct ql_adapter *qdev)
3013{
3014 int i;
3015
3016 for (i = 0; i < qdev->tx_ring_count; i++)
3017 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3018 for (i = 0; i < qdev->rx_ring_count; i++)
3019 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3020 ql_free_shadow_space(qdev);
3021}
3022
3023static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3024{
3025 int i;
3026
3027 /* Allocate space for our shadow registers and such. */
3028 if (ql_alloc_shadow_space(qdev))
3029 return -ENOMEM;
3030
3031 for (i = 0; i < qdev->rx_ring_count; i++) {
3032 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003033 netif_err(qdev, ifup, qdev->ndev,
3034 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003035 goto err_mem;
3036 }
3037 }
3038 /* Allocate tx queue resources */
3039 for (i = 0; i < qdev->tx_ring_count; i++) {
3040 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003041 netif_err(qdev, ifup, qdev->ndev,
3042 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003043 goto err_mem;
3044 }
3045 }
3046 return 0;
3047
3048err_mem:
3049 ql_free_mem_resources(qdev);
3050 return -ENOMEM;
3051}
3052
3053/* Set up the rx ring control block and pass it to the chip.
3054 * The control block is defined as
3055 * "Completion Queue Initialization Control Block", or cqicb.
3056 */
3057static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3058{
3059 struct cqicb *cqicb = &rx_ring->cqicb;
3060 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003061 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003062 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003063 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003064 void __iomem *doorbell_area =
3065 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3066 int err = 0;
3067 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003068 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003069 __le64 *base_indirect_ptr;
3070 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003071
3072 /* Set up the shadow registers for this ring. */
3073 rx_ring->prod_idx_sh_reg = shadow_reg;
3074 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003075 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076 shadow_reg += sizeof(u64);
3077 shadow_reg_dma += sizeof(u64);
3078 rx_ring->lbq_base_indirect = shadow_reg;
3079 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003080 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3081 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003082 rx_ring->sbq_base_indirect = shadow_reg;
3083 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3084
3085 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003086 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003087 rx_ring->cnsmr_idx = 0;
3088 rx_ring->curr_entry = rx_ring->cq_base;
3089
3090 /* PCI doorbell mem area + 0x04 for valid register */
3091 rx_ring->valid_db_reg = doorbell_area + 0x04;
3092
3093 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003094 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003095
3096 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003097 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003098
3099 memset((void *)cqicb, 0, sizeof(struct cqicb));
3100 cqicb->msix_vect = rx_ring->irq;
3101
Ron Mercer459caf52009-01-04 17:08:11 -08003102 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3103 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003104
Ron Mercer97345522009-01-09 11:31:50 +00003105 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003106
Ron Mercer97345522009-01-09 11:31:50 +00003107 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003108
3109 /*
3110 * Set up the control block load flags.
3111 */
3112 cqicb->flags = FLAGS_LC | /* Load queue base address */
3113 FLAGS_LV | /* Load MSI-X vector */
3114 FLAGS_LI; /* Load irq delay values */
3115 if (rx_ring->lbq_len) {
3116 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003117 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003118 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003119 page_entries = 0;
3120 do {
3121 *base_indirect_ptr = cpu_to_le64(tmp);
3122 tmp += DB_PAGE_SIZE;
3123 base_indirect_ptr++;
3124 page_entries++;
3125 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003126 cqicb->lbq_addr =
3127 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003128 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3129 (u16) rx_ring->lbq_buf_size;
3130 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3131 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3132 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003133 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003134 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003135 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003136 rx_ring->lbq_clean_idx = 0;
3137 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003138 }
3139 if (rx_ring->sbq_len) {
3140 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003141 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003142 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003143 page_entries = 0;
3144 do {
3145 *base_indirect_ptr = cpu_to_le64(tmp);
3146 tmp += DB_PAGE_SIZE;
3147 base_indirect_ptr++;
3148 page_entries++;
3149 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003150 cqicb->sbq_addr =
3151 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003153 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003154 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3155 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003156 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003157 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003159 rx_ring->sbq_clean_idx = 0;
3160 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003161 }
3162 switch (rx_ring->type) {
3163 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003164 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3165 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3166 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003167 case RX_Q:
3168 /* Inbound completion handling rx_rings run in
3169 * separate NAPI contexts.
3170 */
3171 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3172 64);
3173 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3174 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3175 break;
3176 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003177 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3178 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003179 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003180 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3181 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3183 CFG_LCQ, rx_ring->cq_id);
3184 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003185 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003186 return err;
3187 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003188 return err;
3189}
3190
3191static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3192{
3193 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3194 void __iomem *doorbell_area =
3195 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3196 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3197 (tx_ring->wq_id * sizeof(u64));
3198 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3199 (tx_ring->wq_id * sizeof(u64));
3200 int err = 0;
3201
3202 /*
3203 * Assign doorbell registers for this tx_ring.
3204 */
3205 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003206 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003207 tx_ring->prod_idx = 0;
3208 /* TX PCI doorbell mem area + 0x04 */
3209 tx_ring->valid_db_reg = doorbell_area + 0x04;
3210
3211 /*
3212 * Assign shadow registers for this tx_ring.
3213 */
3214 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3215 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3216
3217 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3218 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3219 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3220 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3221 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003222 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003223
Ron Mercer97345522009-01-09 11:31:50 +00003224 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003225
3226 ql_init_tx_ring(qdev, tx_ring);
3227
Ron Mercere3324712009-07-02 06:06:13 +00003228 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003229 (u16) tx_ring->wq_id);
3230 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003231 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232 return err;
3233 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003234 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3235 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 return err;
3237}
3238
3239static void ql_disable_msix(struct ql_adapter *qdev)
3240{
3241 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3242 pci_disable_msix(qdev->pdev);
3243 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3244 kfree(qdev->msi_x_entry);
3245 qdev->msi_x_entry = NULL;
3246 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3247 pci_disable_msi(qdev->pdev);
3248 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3249 }
3250}
3251
Ron Mercera4ab6132009-08-27 11:02:10 +00003252/* We start by trying to get the number of vectors
3253 * stored in qdev->intr_count. If we don't get that
3254 * many then we reduce the count and try again.
3255 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003256static void ql_enable_msix(struct ql_adapter *qdev)
3257{
Ron Mercera4ab6132009-08-27 11:02:10 +00003258 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003259
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003261 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003262 /* Try to alloc space for the msix struct,
3263 * if it fails then go to MSI/legacy.
3264 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003265 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003266 sizeof(struct msix_entry),
3267 GFP_KERNEL);
3268 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003269 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003270 goto msi;
3271 }
3272
Ron Mercera4ab6132009-08-27 11:02:10 +00003273 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003274 qdev->msi_x_entry[i].entry = i;
3275
Ron Mercera4ab6132009-08-27 11:02:10 +00003276 /* Loop to get our vectors. We start with
3277 * what we want and settle for what we get.
3278 */
3279 do {
3280 err = pci_enable_msix(qdev->pdev,
3281 qdev->msi_x_entry, qdev->intr_count);
3282 if (err > 0)
3283 qdev->intr_count = err;
3284 } while (err > 0);
3285
3286 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003287 kfree(qdev->msi_x_entry);
3288 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003289 netif_warn(qdev, ifup, qdev->ndev,
3290 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003291 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003292 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003293 } else if (err == 0) {
3294 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003295 netif_info(qdev, ifup, qdev->ndev,
3296 "MSI-X Enabled, got %d vectors.\n",
3297 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003298 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003299 }
3300 }
3301msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003302 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003303 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003304 if (!pci_enable_msi(qdev->pdev)) {
3305 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003306 netif_info(qdev, ifup, qdev->ndev,
3307 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003308 return;
3309 }
3310 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003311 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003312 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3313 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003314}
3315
Ron Mercer39aa8162009-08-27 11:02:11 +00003316/* Each vector services 1 RSS ring and and 1 or more
3317 * TX completion rings. This function loops through
3318 * the TX completion rings and assigns the vector that
3319 * will service it. An example would be if there are
3320 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3321 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003322 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003323 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3324 */
3325static void ql_set_tx_vect(struct ql_adapter *qdev)
3326{
3327 int i, j, vect;
3328 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3329
3330 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3331 /* Assign irq vectors to TX rx_rings.*/
3332 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3333 i < qdev->rx_ring_count; i++) {
3334 if (j == tx_rings_per_vector) {
3335 vect++;
3336 j = 0;
3337 }
3338 qdev->rx_ring[i].irq = vect;
3339 j++;
3340 }
3341 } else {
3342 /* For single vector all rings have an irq
3343 * of zero.
3344 */
3345 for (i = 0; i < qdev->rx_ring_count; i++)
3346 qdev->rx_ring[i].irq = 0;
3347 }
3348}
3349
3350/* Set the interrupt mask for this vector. Each vector
3351 * will service 1 RSS ring and 1 or more TX completion
3352 * rings. This function sets up a bit mask per vector
3353 * that indicates which rings it services.
3354 */
3355static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3356{
3357 int j, vect = ctx->intr;
3358 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3359
3360 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3361 /* Add the RSS ring serviced by this vector
3362 * to the mask.
3363 */
3364 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3365 /* Add the TX ring(s) serviced by this vector
3366 * to the mask. */
3367 for (j = 0; j < tx_rings_per_vector; j++) {
3368 ctx->irq_mask |=
3369 (1 << qdev->rx_ring[qdev->rss_ring_count +
3370 (vect * tx_rings_per_vector) + j].cq_id);
3371 }
3372 } else {
3373 /* For single vector we just shift each queue's
3374 * ID into the mask.
3375 */
3376 for (j = 0; j < qdev->rx_ring_count; j++)
3377 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3378 }
3379}
3380
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003381/*
3382 * Here we build the intr_context structures based on
3383 * our rx_ring count and intr vector count.
3384 * The intr_context structure is used to hook each vector
3385 * to possibly different handlers.
3386 */
3387static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3388{
3389 int i = 0;
3390 struct intr_context *intr_context = &qdev->intr_context[0];
3391
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003392 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3393 /* Each rx_ring has it's
3394 * own intr_context since we have separate
3395 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003396 */
3397 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3398 qdev->rx_ring[i].irq = i;
3399 intr_context->intr = i;
3400 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003401 /* Set up this vector's bit-mask that indicates
3402 * which queues it services.
3403 */
3404 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003405 /*
3406 * We set up each vectors enable/disable/read bits so
3407 * there's no bit/mask calculations in the critical path.
3408 */
3409 intr_context->intr_en_mask =
3410 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3411 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3412 | i;
3413 intr_context->intr_dis_mask =
3414 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3415 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3416 INTR_EN_IHD | i;
3417 intr_context->intr_read_mask =
3418 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3419 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3420 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003421 if (i == 0) {
3422 /* The first vector/queue handles
3423 * broadcast/multicast, fatal errors,
3424 * and firmware events. This in addition
3425 * to normal inbound NAPI processing.
3426 */
3427 intr_context->handler = qlge_isr;
3428 sprintf(intr_context->name, "%s-rx-%d",
3429 qdev->ndev->name, i);
3430 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003431 /*
3432 * Inbound queues handle unicast frames only.
3433 */
3434 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003435 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003436 qdev->ndev->name, i);
3437 }
3438 }
3439 } else {
3440 /*
3441 * All rx_rings use the same intr_context since
3442 * there is only one vector.
3443 */
3444 intr_context->intr = 0;
3445 intr_context->qdev = qdev;
3446 /*
3447 * We set up each vectors enable/disable/read bits so
3448 * there's no bit/mask calculations in the critical path.
3449 */
3450 intr_context->intr_en_mask =
3451 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3452 intr_context->intr_dis_mask =
3453 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3454 INTR_EN_TYPE_DISABLE;
3455 intr_context->intr_read_mask =
3456 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3457 /*
3458 * Single interrupt means one handler for all rings.
3459 */
3460 intr_context->handler = qlge_isr;
3461 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003462 /* Set up this vector's bit-mask that indicates
3463 * which queues it services. In this case there is
3464 * a single vector so it will service all RSS and
3465 * TX completion rings.
3466 */
3467 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003468 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003469 /* Tell the TX completion rings which MSIx vector
3470 * they will be using.
3471 */
3472 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003473}
3474
3475static void ql_free_irq(struct ql_adapter *qdev)
3476{
3477 int i;
3478 struct intr_context *intr_context = &qdev->intr_context[0];
3479
3480 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3481 if (intr_context->hooked) {
3482 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3483 free_irq(qdev->msi_x_entry[i].vector,
3484 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003485 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3486 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003487 } else {
3488 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003489 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3490 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003491 }
3492 }
3493 }
3494 ql_disable_msix(qdev);
3495}
3496
3497static int ql_request_irq(struct ql_adapter *qdev)
3498{
3499 int i;
3500 int status = 0;
3501 struct pci_dev *pdev = qdev->pdev;
3502 struct intr_context *intr_context = &qdev->intr_context[0];
3503
3504 ql_resolve_queues_to_irqs(qdev);
3505
3506 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3507 atomic_set(&intr_context->irq_cnt, 0);
3508 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3509 status = request_irq(qdev->msi_x_entry[i].vector,
3510 intr_context->handler,
3511 0,
3512 intr_context->name,
3513 &qdev->rx_ring[i]);
3514 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003515 netif_err(qdev, ifup, qdev->ndev,
3516 "Failed request for MSIX interrupt %d.\n",
3517 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003518 goto err_irq;
3519 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003520 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3521 "Hooked intr %d, queue type %s, with name %s.\n",
3522 i,
3523 qdev->rx_ring[i].type == DEFAULT_Q ?
3524 "DEFAULT_Q" :
3525 qdev->rx_ring[i].type == TX_Q ?
3526 "TX_Q" :
3527 qdev->rx_ring[i].type == RX_Q ?
3528 "RX_Q" : "",
3529 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003530 }
3531 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003532 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3533 "trying msi or legacy interrupts.\n");
3534 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3535 "%s: irq = %d.\n", __func__, pdev->irq);
3536 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3537 "%s: context->name = %s.\n", __func__,
3538 intr_context->name);
3539 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3540 "%s: dev_id = 0x%p.\n", __func__,
3541 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003542 status =
3543 request_irq(pdev->irq, qlge_isr,
3544 test_bit(QL_MSI_ENABLED,
3545 &qdev->
3546 flags) ? 0 : IRQF_SHARED,
3547 intr_context->name, &qdev->rx_ring[0]);
3548 if (status)
3549 goto err_irq;
3550
Joe Perchesae9540f72010-02-09 11:49:52 +00003551 netif_err(qdev, ifup, qdev->ndev,
3552 "Hooked intr %d, queue type %s, with name %s.\n",
3553 i,
3554 qdev->rx_ring[0].type == DEFAULT_Q ?
3555 "DEFAULT_Q" :
3556 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3557 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3558 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 }
3560 intr_context->hooked = 1;
3561 }
3562 return status;
3563err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003564 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565 ql_free_irq(qdev);
3566 return status;
3567}
3568
3569static int ql_start_rss(struct ql_adapter *qdev)
3570{
Joe Perches215faf92010-12-21 02:16:10 -08003571 static const u8 init_hash_seed[] = {
3572 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3573 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3574 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3575 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3576 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3577 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003578 struct ricb *ricb = &qdev->ricb;
3579 int status = 0;
3580 int i;
3581 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3582
Ron Mercere3324712009-07-02 06:06:13 +00003583 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003584
Ron Mercerb2014ff2009-08-27 11:02:09 +00003585 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003587 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3588 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003589
3590 /*
3591 * Fill out the Indirection Table.
3592 */
Ron Mercer541ae282009-10-08 09:54:37 +00003593 for (i = 0; i < 1024; i++)
3594 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003595
Ron Mercer541ae282009-10-08 09:54:37 +00003596 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3597 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598
Joe Perchesae9540f72010-02-09 11:49:52 +00003599 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003600
Ron Mercere3324712009-07-02 06:06:13 +00003601 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003602 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003603 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003604 return status;
3605 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003606 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3607 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003608 return status;
3609}
3610
Ron Mercera5f59dc2009-07-02 06:06:07 +00003611static int ql_clear_routing_entries(struct ql_adapter *qdev)
3612{
3613 int i, status = 0;
3614
3615 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3616 if (status)
3617 return status;
3618 /* Clear all the entries in the routing table. */
3619 for (i = 0; i < 16; i++) {
3620 status = ql_set_routing_reg(qdev, i, 0, 0);
3621 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003622 netif_err(qdev, ifup, qdev->ndev,
3623 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003624 break;
3625 }
3626 }
3627 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3628 return status;
3629}
3630
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003631/* Initialize the frame-to-queue routing. */
3632static int ql_route_initialize(struct ql_adapter *qdev)
3633{
3634 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003635
3636 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003637 status = ql_clear_routing_entries(qdev);
3638 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003639 return status;
3640
3641 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3642 if (status)
3643 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003644
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003645 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3646 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003647 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003648 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003649 "Failed to init routing register "
3650 "for IP CSUM error packets.\n");
3651 goto exit;
3652 }
3653 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3654 RT_IDX_TU_CSUM_ERR, 1);
3655 if (status) {
3656 netif_err(qdev, ifup, qdev->ndev,
3657 "Failed to init routing register "
3658 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003659 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003660 }
3661 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3662 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003663 netif_err(qdev, ifup, qdev->ndev,
3664 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003665 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003666 }
3667 /* If we have more than one inbound queue, then turn on RSS in the
3668 * routing block.
3669 */
3670 if (qdev->rss_ring_count > 1) {
3671 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3672 RT_IDX_RSS_MATCH, 1);
3673 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003674 netif_err(qdev, ifup, qdev->ndev,
3675 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003676 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003677 }
3678 }
3679
3680 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3681 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003682 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003683 netif_err(qdev, ifup, qdev->ndev,
3684 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003685exit:
3686 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003687 return status;
3688}
3689
Ron Mercer2ee1e272009-03-03 12:10:33 +00003690int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003691{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003692 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003693
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003694 /* If check if the link is up and use to
3695 * determine if we are setting or clearing
3696 * the MAC address in the CAM.
3697 */
3698 set = ql_read32(qdev, STS);
3699 set &= qdev->port_link_up;
3700 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003701 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003702 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003703 return status;
3704 }
3705
3706 status = ql_route_initialize(qdev);
3707 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003708 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003709
3710 return status;
3711}
3712
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003713static int ql_adapter_initialize(struct ql_adapter *qdev)
3714{
3715 u32 value, mask;
3716 int i;
3717 int status = 0;
3718
3719 /*
3720 * Set up the System register to halt on errors.
3721 */
3722 value = SYS_EFE | SYS_FAE;
3723 mask = value << 16;
3724 ql_write32(qdev, SYS, mask | value);
3725
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003726 /* Set the default queue, and VLAN behavior. */
3727 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3728 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003729 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3730
3731 /* Set the MPI interrupt to enabled. */
3732 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3733
3734 /* Enable the function, set pagesize, enable error checking. */
3735 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003736 FSC_EC | FSC_VM_PAGE_4K;
3737 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003738
3739 /* Set/clear header splitting. */
3740 mask = FSC_VM_PAGESIZE_MASK |
3741 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3742 ql_write32(qdev, FSC, mask | value);
3743
Ron Mercer572c5262010-01-02 10:37:42 +00003744 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003745
Ron Mercera3b71932009-10-08 09:54:38 +00003746 /* Set RX packet routing to use port/pci function on which the
3747 * packet arrived on in addition to usual frame routing.
3748 * This is helpful on bonding where both interfaces can have
3749 * the same MAC address.
3750 */
3751 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003752 /* Reroute all packets to our Interface.
3753 * They may have been routed to MPI firmware
3754 * due to WOL.
3755 */
3756 value = ql_read32(qdev, MGMT_RCV_CFG);
3757 value &= ~MGMT_RCV_CFG_RM;
3758 mask = 0xffff0000;
3759
3760 /* Sticky reg needs clearing due to WOL. */
3761 ql_write32(qdev, MGMT_RCV_CFG, mask);
3762 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3763
3764 /* Default WOL is enable on Mezz cards */
3765 if (qdev->pdev->subsystem_device == 0x0068 ||
3766 qdev->pdev->subsystem_device == 0x0180)
3767 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003768
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003769 /* Start up the rx queues. */
3770 for (i = 0; i < qdev->rx_ring_count; i++) {
3771 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3772 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003773 netif_err(qdev, ifup, qdev->ndev,
3774 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003775 return status;
3776 }
3777 }
3778
3779 /* If there is more than one inbound completion queue
3780 * then download a RICB to configure RSS.
3781 */
3782 if (qdev->rss_ring_count > 1) {
3783 status = ql_start_rss(qdev);
3784 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003785 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003786 return status;
3787 }
3788 }
3789
3790 /* Start up the tx queues. */
3791 for (i = 0; i < qdev->tx_ring_count; i++) {
3792 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3793 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003794 netif_err(qdev, ifup, qdev->ndev,
3795 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003796 return status;
3797 }
3798 }
3799
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003800 /* Initialize the port and set the max framesize. */
3801 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003802 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003803 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003804
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003805 /* Set up the MAC address and frame routing filter. */
3806 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003807 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003808 netif_err(qdev, ifup, qdev->ndev,
3809 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003810 return status;
3811 }
3812
3813 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003814 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003815 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3816 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003817 napi_enable(&qdev->rx_ring[i].napi);
3818 }
3819
3820 return status;
3821}
3822
3823/* Issue soft reset to chip. */
3824static int ql_adapter_reset(struct ql_adapter *qdev)
3825{
3826 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003827 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003828 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003829
Ron Mercera5f59dc2009-07-02 06:06:07 +00003830 /* Clear all the entries in the routing table. */
3831 status = ql_clear_routing_entries(qdev);
3832 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003833 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003834 return status;
3835 }
3836
3837 end_jiffies = jiffies +
3838 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003839
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003840 /* Check if bit is set then skip the mailbox command and
3841 * clear the bit, else we are in normal reset process.
3842 */
3843 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3844 /* Stop management traffic. */
3845 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003846
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003847 /* Wait for the NIC and MGMNT FIFOs to empty. */
3848 ql_wait_fifo_empty(qdev);
3849 } else
3850 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003851
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003852 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003853
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003854 do {
3855 value = ql_read32(qdev, RST_FO);
3856 if ((value & RST_FO_FR) == 0)
3857 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003858 cpu_relax();
3859 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003860
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003861 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003862 netif_err(qdev, ifdown, qdev->ndev,
3863 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003864 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003865 }
3866
Ron Mercer84087f42009-10-08 09:54:41 +00003867 /* Resume management traffic. */
3868 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003869 return status;
3870}
3871
3872static void ql_display_dev_info(struct net_device *ndev)
3873{
Joe Perchesb16fed02010-11-15 11:12:28 +00003874 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003875
Joe Perchesae9540f72010-02-09 11:49:52 +00003876 netif_info(qdev, probe, qdev->ndev,
3877 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3878 "XG Roll = %d, XG Rev = %d.\n",
3879 qdev->func,
3880 qdev->port,
3881 qdev->chip_rev_id & 0x0000000f,
3882 qdev->chip_rev_id >> 4 & 0x0000000f,
3883 qdev->chip_rev_id >> 8 & 0x0000000f,
3884 qdev->chip_rev_id >> 12 & 0x0000000f);
3885 netif_info(qdev, probe, qdev->ndev,
3886 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003887}
3888
stephen hemmingerac409212010-10-21 07:50:54 +00003889static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003890{
3891 int status = 0;
3892 u32 wol = MB_WOL_DISABLE;
3893
3894 /* The CAM is still intact after a reset, but if we
3895 * are doing WOL, then we may need to program the
3896 * routing regs. We would also need to issue the mailbox
3897 * commands to instruct the MPI what to do per the ethtool
3898 * settings.
3899 */
3900
3901 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3902 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003903 netif_err(qdev, ifdown, qdev->ndev,
3904 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3905 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003906 return -EINVAL;
3907 }
3908
3909 if (qdev->wol & WAKE_MAGIC) {
3910 status = ql_mb_wol_set_magic(qdev, 1);
3911 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003912 netif_err(qdev, ifdown, qdev->ndev,
3913 "Failed to set magic packet on %s.\n",
3914 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003915 return status;
3916 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003917 netif_info(qdev, drv, qdev->ndev,
3918 "Enabled magic packet successfully on %s.\n",
3919 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003920
3921 wol |= MB_WOL_MAGIC_PKT;
3922 }
3923
3924 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003925 wol |= MB_WOL_MODE_ON;
3926 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003927 netif_err(qdev, drv, qdev->ndev,
3928 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003929 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003930 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003931 }
3932
3933 return status;
3934}
3935
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003936static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003937{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003938
Ron Mercer6497b602009-02-12 16:37:13 -08003939 /* Don't kill the reset worker thread if we
3940 * are in the process of recovery.
3941 */
3942 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3943 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003944 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3945 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003946 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003947 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003948 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003949}
3950
3951static int ql_adapter_down(struct ql_adapter *qdev)
3952{
3953 int i, status = 0;
3954
3955 ql_link_off(qdev);
3956
3957 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003958
Ron Mercer39aa8162009-08-27 11:02:11 +00003959 for (i = 0; i < qdev->rss_ring_count; i++)
3960 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003961
3962 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3963
3964 ql_disable_interrupts(qdev);
3965
3966 ql_tx_ring_clean(qdev);
3967
Ron Mercer6b318cb2009-03-09 10:59:26 +00003968 /* Call netif_napi_del() from common point.
3969 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003970 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003971 netif_napi_del(&qdev->rx_ring[i].napi);
3972
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003973 status = ql_adapter_reset(qdev);
3974 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003975 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3976 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003977 ql_free_rx_buffers(qdev);
3978
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003979 return status;
3980}
3981
3982static int ql_adapter_up(struct ql_adapter *qdev)
3983{
3984 int err = 0;
3985
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003986 err = ql_adapter_initialize(qdev);
3987 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003988 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003989 goto err_init;
3990 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003991 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003992 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003993 /* If the port is initialized and the
3994 * link is up the turn on the carrier.
3995 */
3996 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3997 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003998 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003999 /* Restore rx mode. */
4000 clear_bit(QL_ALLMULTI, &qdev->flags);
4001 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4002 qlge_set_multicast_list(qdev->ndev);
4003
Ron Mercerc1b60092010-10-27 04:58:12 +00004004 /* Restore vlan setting. */
4005 qlge_restore_vlan(qdev);
4006
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004007 ql_enable_interrupts(qdev);
4008 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004009 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004010
4011 return 0;
4012err_init:
4013 ql_adapter_reset(qdev);
4014 return err;
4015}
4016
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004017static void ql_release_adapter_resources(struct ql_adapter *qdev)
4018{
4019 ql_free_mem_resources(qdev);
4020 ql_free_irq(qdev);
4021}
4022
4023static int ql_get_adapter_resources(struct ql_adapter *qdev)
4024{
4025 int status = 0;
4026
4027 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004028 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004029 return -ENOMEM;
4030 }
4031 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004032 return status;
4033}
4034
4035static int qlge_close(struct net_device *ndev)
4036{
4037 struct ql_adapter *qdev = netdev_priv(ndev);
4038
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004039 /* If we hit pci_channel_io_perm_failure
4040 * failure condition, then we already
4041 * brought the adapter down.
4042 */
4043 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004044 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004045 clear_bit(QL_EEH_FATAL, &qdev->flags);
4046 return 0;
4047 }
4048
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004049 /*
4050 * Wait for device to recover from a reset.
4051 * (Rarely happens, but possible.)
4052 */
4053 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4054 msleep(1);
4055 ql_adapter_down(qdev);
4056 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004057 return 0;
4058}
4059
4060static int ql_configure_rings(struct ql_adapter *qdev)
4061{
4062 int i;
4063 struct rx_ring *rx_ring;
4064 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004065 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004066 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4067 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4068
4069 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004070
Ron Mercera4ab6132009-08-27 11:02:10 +00004071 /* In a perfect world we have one RSS ring for each CPU
4072 * and each has it's own vector. To do that we ask for
4073 * cpu_cnt vectors. ql_enable_msix() will adjust the
4074 * vector count to what we actually get. We then
4075 * allocate an RSS ring for each.
4076 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004077 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004078 qdev->intr_count = cpu_cnt;
4079 ql_enable_msix(qdev);
4080 /* Adjust the RSS ring count to the actual vector count. */
4081 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004082 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004083 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004084
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004085 for (i = 0; i < qdev->tx_ring_count; i++) {
4086 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004087 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004088 tx_ring->qdev = qdev;
4089 tx_ring->wq_id = i;
4090 tx_ring->wq_len = qdev->tx_ring_size;
4091 tx_ring->wq_size =
4092 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4093
4094 /*
4095 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004096 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004097 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004098 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004099 }
4100
4101 for (i = 0; i < qdev->rx_ring_count; i++) {
4102 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004103 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004104 rx_ring->qdev = qdev;
4105 rx_ring->cq_id = i;
4106 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004107 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004108 /*
4109 * Inbound (RSS) queues.
4110 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004111 rx_ring->cq_len = qdev->rx_ring_size;
4112 rx_ring->cq_size =
4113 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4114 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4115 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004116 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004117 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004118 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4119 "lbq_buf_size %d, order = %d\n",
4120 rx_ring->lbq_buf_size,
4121 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004122 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4123 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004124 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004125 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004126 rx_ring->type = RX_Q;
4127 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004128 /*
4129 * Outbound queue handles outbound completions only.
4130 */
4131 /* outbound cq is same size as tx_ring it services. */
4132 rx_ring->cq_len = qdev->tx_ring_size;
4133 rx_ring->cq_size =
4134 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4135 rx_ring->lbq_len = 0;
4136 rx_ring->lbq_size = 0;
4137 rx_ring->lbq_buf_size = 0;
4138 rx_ring->sbq_len = 0;
4139 rx_ring->sbq_size = 0;
4140 rx_ring->sbq_buf_size = 0;
4141 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004142 }
4143 }
4144 return 0;
4145}
4146
4147static int qlge_open(struct net_device *ndev)
4148{
4149 int err = 0;
4150 struct ql_adapter *qdev = netdev_priv(ndev);
4151
Ron Mercer74e12432009-11-11 12:54:04 +00004152 err = ql_adapter_reset(qdev);
4153 if (err)
4154 return err;
4155
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004156 err = ql_configure_rings(qdev);
4157 if (err)
4158 return err;
4159
4160 err = ql_get_adapter_resources(qdev);
4161 if (err)
4162 goto error_up;
4163
4164 err = ql_adapter_up(qdev);
4165 if (err)
4166 goto error_up;
4167
4168 return err;
4169
4170error_up:
4171 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004172 return err;
4173}
4174
Ron Mercer7c734352009-10-19 03:32:19 +00004175static int ql_change_rx_buffers(struct ql_adapter *qdev)
4176{
4177 struct rx_ring *rx_ring;
4178 int i, status;
4179 u32 lbq_buf_len;
4180
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004181 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004182 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4183 int i = 3;
4184 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004185 netif_err(qdev, ifup, qdev->ndev,
4186 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004187 ssleep(1);
4188 }
4189
4190 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004191 netif_err(qdev, ifup, qdev->ndev,
4192 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004193 return -ETIMEDOUT;
4194 }
4195 }
4196
4197 status = ql_adapter_down(qdev);
4198 if (status)
4199 goto error;
4200
4201 /* Get the new rx buffer size. */
4202 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4203 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4204 qdev->lbq_buf_order = get_order(lbq_buf_len);
4205
4206 for (i = 0; i < qdev->rss_ring_count; i++) {
4207 rx_ring = &qdev->rx_ring[i];
4208 /* Set the new size. */
4209 rx_ring->lbq_buf_size = lbq_buf_len;
4210 }
4211
4212 status = ql_adapter_up(qdev);
4213 if (status)
4214 goto error;
4215
4216 return status;
4217error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004218 netif_alert(qdev, ifup, qdev->ndev,
4219 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004220 set_bit(QL_ADAPTER_UP, &qdev->flags);
4221 dev_close(qdev->ndev);
4222 return status;
4223}
4224
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004225static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4226{
4227 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004228 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004229
4230 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004231 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004232 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004233 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004234 } else
4235 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004236
4237 queue_delayed_work(qdev->workqueue,
4238 &qdev->mpi_port_cfg_work, 3*HZ);
4239
Breno Leitao746079d2010-02-04 10:11:19 +00004240 ndev->mtu = new_mtu;
4241
Ron Mercer7c734352009-10-19 03:32:19 +00004242 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004243 return 0;
4244 }
4245
Ron Mercer7c734352009-10-19 03:32:19 +00004246 status = ql_change_rx_buffers(qdev);
4247 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004248 netif_err(qdev, ifup, qdev->ndev,
4249 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004250 }
4251
4252 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004253}
4254
4255static struct net_device_stats *qlge_get_stats(struct net_device
4256 *ndev)
4257{
Ron Mercer885ee392009-11-03 13:49:31 +00004258 struct ql_adapter *qdev = netdev_priv(ndev);
4259 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4260 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4261 unsigned long pkts, mcast, dropped, errors, bytes;
4262 int i;
4263
4264 /* Get RX stats. */
4265 pkts = mcast = dropped = errors = bytes = 0;
4266 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4267 pkts += rx_ring->rx_packets;
4268 bytes += rx_ring->rx_bytes;
4269 dropped += rx_ring->rx_dropped;
4270 errors += rx_ring->rx_errors;
4271 mcast += rx_ring->rx_multicast;
4272 }
4273 ndev->stats.rx_packets = pkts;
4274 ndev->stats.rx_bytes = bytes;
4275 ndev->stats.rx_dropped = dropped;
4276 ndev->stats.rx_errors = errors;
4277 ndev->stats.multicast = mcast;
4278
4279 /* Get TX stats. */
4280 pkts = errors = bytes = 0;
4281 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4282 pkts += tx_ring->tx_packets;
4283 bytes += tx_ring->tx_bytes;
4284 errors += tx_ring->tx_errors;
4285 }
4286 ndev->stats.tx_packets = pkts;
4287 ndev->stats.tx_bytes = bytes;
4288 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004289 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004290}
4291
stephen hemmingerac409212010-10-21 07:50:54 +00004292static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004293{
Joe Perchesb16fed02010-11-15 11:12:28 +00004294 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004295 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004296 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004297
Ron Mercercc288f52009-02-23 10:42:14 +00004298 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4299 if (status)
4300 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004301 /*
4302 * Set or clear promiscuous mode if a
4303 * transition is taking place.
4304 */
4305 if (ndev->flags & IFF_PROMISC) {
4306 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4307 if (ql_set_routing_reg
4308 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004309 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004310 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004311 } else {
4312 set_bit(QL_PROMISCUOUS, &qdev->flags);
4313 }
4314 }
4315 } else {
4316 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4317 if (ql_set_routing_reg
4318 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004319 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004320 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004321 } else {
4322 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4323 }
4324 }
4325 }
4326
4327 /*
4328 * Set or clear all multicast mode if a
4329 * transition is taking place.
4330 */
4331 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004332 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4334 if (ql_set_routing_reg
4335 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004336 netif_err(qdev, hw, qdev->ndev,
4337 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004338 } else {
4339 set_bit(QL_ALLMULTI, &qdev->flags);
4340 }
4341 }
4342 } else {
4343 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4344 if (ql_set_routing_reg
4345 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004346 netif_err(qdev, hw, qdev->ndev,
4347 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004348 } else {
4349 clear_bit(QL_ALLMULTI, &qdev->flags);
4350 }
4351 }
4352 }
4353
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004354 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004355 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4356 if (status)
4357 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004358 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004359 netdev_for_each_mc_addr(ha, ndev) {
4360 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004361 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004362 netif_err(qdev, hw, qdev->ndev,
4363 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004364 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004365 goto exit;
4366 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004367 i++;
4368 }
Ron Mercercc288f52009-02-23 10:42:14 +00004369 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004370 if (ql_set_routing_reg
4371 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004372 netif_err(qdev, hw, qdev->ndev,
4373 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004374 } else {
4375 set_bit(QL_ALLMULTI, &qdev->flags);
4376 }
4377 }
4378exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004379 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004380}
4381
4382static int qlge_set_mac_address(struct net_device *ndev, void *p)
4383{
Joe Perchesb16fed02010-11-15 11:12:28 +00004384 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004385 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004386 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004387
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004388 if (!is_valid_ether_addr(addr->sa_data))
4389 return -EADDRNOTAVAIL;
4390 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004391 /* Update local copy of current mac address. */
4392 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004393
Ron Mercercc288f52009-02-23 10:42:14 +00004394 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4395 if (status)
4396 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004397 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4398 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004399 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004400 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004401 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4402 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004403}
4404
4405static void qlge_tx_timeout(struct net_device *ndev)
4406{
Joe Perchesb16fed02010-11-15 11:12:28 +00004407 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004408 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004409}
4410
4411static void ql_asic_reset_work(struct work_struct *work)
4412{
4413 struct ql_adapter *qdev =
4414 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004415 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004416 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004417 status = ql_adapter_down(qdev);
4418 if (status)
4419 goto error;
4420
4421 status = ql_adapter_up(qdev);
4422 if (status)
4423 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004424
4425 /* Restore rx mode. */
4426 clear_bit(QL_ALLMULTI, &qdev->flags);
4427 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4428 qlge_set_multicast_list(qdev->ndev);
4429
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004430 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004431 return;
4432error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004433 netif_alert(qdev, ifup, qdev->ndev,
4434 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004435
Ron Mercerdb988122009-03-09 10:59:17 +00004436 set_bit(QL_ADAPTER_UP, &qdev->flags);
4437 dev_close(qdev->ndev);
4438 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004439}
4440
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004441static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004442 .get_flash = ql_get_8012_flash_params,
4443 .port_initialize = ql_8012_port_initialize,
4444};
4445
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004446static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004447 .get_flash = ql_get_8000_flash_params,
4448 .port_initialize = ql_8000_port_initialize,
4449};
4450
Ron Mercere4552f52009-06-09 05:39:32 +00004451/* Find the pcie function number for the other NIC
4452 * on this chip. Since both NIC functions share a
4453 * common firmware we have the lowest enabled function
4454 * do any common work. Examples would be resetting
4455 * after a fatal firmware error, or doing a firmware
4456 * coredump.
4457 */
4458static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004459{
Ron Mercere4552f52009-06-09 05:39:32 +00004460 int status = 0;
4461 u32 temp;
4462 u32 nic_func1, nic_func2;
4463
4464 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4465 &temp);
4466 if (status)
4467 return status;
4468
4469 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4470 MPI_TEST_NIC_FUNC_MASK);
4471 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4472 MPI_TEST_NIC_FUNC_MASK);
4473
4474 if (qdev->func == nic_func1)
4475 qdev->alt_func = nic_func2;
4476 else if (qdev->func == nic_func2)
4477 qdev->alt_func = nic_func1;
4478 else
4479 status = -EIO;
4480
4481 return status;
4482}
4483
4484static int ql_get_board_info(struct ql_adapter *qdev)
4485{
4486 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004487 qdev->func =
4488 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004489 if (qdev->func > 3)
4490 return -EIO;
4491
4492 status = ql_get_alt_pcie_func(qdev);
4493 if (status)
4494 return status;
4495
4496 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4497 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004498 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4499 qdev->port_link_up = STS_PL1;
4500 qdev->port_init = STS_PI1;
4501 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4502 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4503 } else {
4504 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4505 qdev->port_link_up = STS_PL0;
4506 qdev->port_init = STS_PI0;
4507 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4508 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4509 }
4510 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004511 qdev->device_id = qdev->pdev->device;
4512 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4513 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004514 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4515 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004516 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004517}
4518
4519static void ql_release_all(struct pci_dev *pdev)
4520{
4521 struct net_device *ndev = pci_get_drvdata(pdev);
4522 struct ql_adapter *qdev = netdev_priv(ndev);
4523
4524 if (qdev->workqueue) {
4525 destroy_workqueue(qdev->workqueue);
4526 qdev->workqueue = NULL;
4527 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004528
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004529 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004530 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004531 if (qdev->doorbell_area)
4532 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004533 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004534 pci_release_regions(pdev);
4535 pci_set_drvdata(pdev, NULL);
4536}
4537
4538static int __devinit ql_init_device(struct pci_dev *pdev,
4539 struct net_device *ndev, int cards_found)
4540{
4541 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004542 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004543
Ron Mercere3324712009-07-02 06:06:13 +00004544 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004545 err = pci_enable_device(pdev);
4546 if (err) {
4547 dev_err(&pdev->dev, "PCI device enable failed.\n");
4548 return err;
4549 }
4550
Ron Mercerebd6e772009-09-29 08:39:25 +00004551 qdev->ndev = ndev;
4552 qdev->pdev = pdev;
4553 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004554
Ron Mercerbc9167f2009-10-10 09:35:04 +00004555 /* Set PCIe read request size */
4556 err = pcie_set_readrq(pdev, 4096);
4557 if (err) {
4558 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004559 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004560 }
4561
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004562 err = pci_request_regions(pdev, DRV_NAME);
4563 if (err) {
4564 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004565 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 }
4567
4568 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004569 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004570 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004571 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004572 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004573 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004575 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576 }
4577
4578 if (err) {
4579 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004580 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004581 }
4582
Ron Mercer73475332009-11-06 07:44:58 +00004583 /* Set PCIe reset type for EEH to fundamental. */
4584 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004585 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004586 qdev->reg_base =
4587 ioremap_nocache(pci_resource_start(pdev, 1),
4588 pci_resource_len(pdev, 1));
4589 if (!qdev->reg_base) {
4590 dev_err(&pdev->dev, "Register mapping failed.\n");
4591 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004592 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004593 }
4594
4595 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4596 qdev->doorbell_area =
4597 ioremap_nocache(pci_resource_start(pdev, 3),
4598 pci_resource_len(pdev, 3));
4599 if (!qdev->doorbell_area) {
4600 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4601 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004602 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004603 }
4604
Ron Mercere4552f52009-06-09 05:39:32 +00004605 err = ql_get_board_info(qdev);
4606 if (err) {
4607 dev_err(&pdev->dev, "Register access failed.\n");
4608 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004609 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004610 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004611 qdev->msg_enable = netif_msg_init(debug, default_msg);
4612 spin_lock_init(&qdev->hw_lock);
4613 spin_lock_init(&qdev->stats_lock);
4614
Ron Mercer8aae2602010-01-15 13:31:28 +00004615 if (qlge_mpi_coredump) {
4616 qdev->mpi_coredump =
4617 vmalloc(sizeof(struct ql_mpi_coredump));
4618 if (qdev->mpi_coredump == NULL) {
4619 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4620 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004621 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004622 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004623 if (qlge_force_coredump)
4624 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004625 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004626 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004627 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004628 if (err) {
4629 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004630 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004631 }
4632
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004633 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004634 /* Keep local copy of current mac address. */
4635 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004636
4637 /* Set up the default ring sizes. */
4638 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4639 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4640
4641 /* Set up the coalescing parameters. */
4642 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4643 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4644 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4645 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4646
4647 /*
4648 * Set up the operating parameters.
4649 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004650 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4651 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4652 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4653 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004654 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004655 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004656 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004657 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004658 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004659
4660 if (!cards_found) {
4661 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4662 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4663 DRV_NAME, DRV_VERSION);
4664 }
4665 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004666err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004667 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004668err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004669 pci_disable_device(pdev);
4670 return err;
4671}
4672
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004673static const struct net_device_ops qlge_netdev_ops = {
4674 .ndo_open = qlge_open,
4675 .ndo_stop = qlge_close,
4676 .ndo_start_xmit = qlge_send,
4677 .ndo_change_mtu = qlge_change_mtu,
4678 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004679 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004680 .ndo_set_mac_address = qlge_set_mac_address,
4681 .ndo_validate_addr = eth_validate_addr,
4682 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004683 .ndo_fix_features = qlge_fix_features,
4684 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004685 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4686 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004687};
4688
Ron Mercer15c052f2010-02-04 13:32:46 -08004689static void ql_timer(unsigned long data)
4690{
4691 struct ql_adapter *qdev = (struct ql_adapter *)data;
4692 u32 var = 0;
4693
4694 var = ql_read32(qdev, STS);
4695 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004696 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004697 return;
4698 }
4699
Breno Leitao72046d82010-07-01 03:00:17 +00004700 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004701}
4702
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004703static int __devinit qlge_probe(struct pci_dev *pdev,
4704 const struct pci_device_id *pci_entry)
4705{
4706 struct net_device *ndev = NULL;
4707 struct ql_adapter *qdev = NULL;
4708 static int cards_found = 0;
4709 int err = 0;
4710
Ron Mercer1e213302009-03-09 10:59:21 +00004711 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4712 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004713 if (!ndev)
4714 return -ENOMEM;
4715
4716 err = ql_init_device(pdev, ndev, cards_found);
4717 if (err < 0) {
4718 free_netdev(ndev);
4719 return err;
4720 }
4721
4722 qdev = netdev_priv(ndev);
4723 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004724 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4725 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4726 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4727 ndev->features = ndev->hw_features |
4728 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004729
4730 if (test_bit(QL_DMA64, &qdev->flags))
4731 ndev->features |= NETIF_F_HIGHDMA;
4732
4733 /*
4734 * Set up net_device structure.
4735 */
4736 ndev->tx_queue_len = qdev->tx_ring_size;
4737 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004738
4739 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004740 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004741 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004742
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004743 err = register_netdev(ndev);
4744 if (err) {
4745 dev_err(&pdev->dev, "net device registration failed.\n");
4746 ql_release_all(pdev);
4747 pci_disable_device(pdev);
4748 return err;
4749 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004750 /* Start up the timer to trigger EEH if
4751 * the bus goes dead
4752 */
4753 init_timer_deferrable(&qdev->timer);
4754 qdev->timer.data = (unsigned long)qdev;
4755 qdev->timer.function = ql_timer;
4756 qdev->timer.expires = jiffies + (5*HZ);
4757 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004758 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004759 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004760 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004761 cards_found++;
4762 return 0;
4763}
4764
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004765netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4766{
4767 return qlge_send(skb, ndev);
4768}
4769
4770int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4771{
4772 return ql_clean_inbound_rx_ring(rx_ring, budget);
4773}
4774
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004775static void __devexit qlge_remove(struct pci_dev *pdev)
4776{
4777 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004778 struct ql_adapter *qdev = netdev_priv(ndev);
4779 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004780 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004781 unregister_netdev(ndev);
4782 ql_release_all(pdev);
4783 pci_disable_device(pdev);
4784 free_netdev(ndev);
4785}
4786
Ron Mercer6d190c62009-10-28 08:39:20 +00004787/* Clean up resources without touching hardware. */
4788static void ql_eeh_close(struct net_device *ndev)
4789{
4790 int i;
4791 struct ql_adapter *qdev = netdev_priv(ndev);
4792
4793 if (netif_carrier_ok(ndev)) {
4794 netif_carrier_off(ndev);
4795 netif_stop_queue(ndev);
4796 }
4797
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004798 /* Disabling the timer */
4799 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004800 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004801
4802 for (i = 0; i < qdev->rss_ring_count; i++)
4803 netif_napi_del(&qdev->rx_ring[i].napi);
4804
4805 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4806 ql_tx_ring_clean(qdev);
4807 ql_free_rx_buffers(qdev);
4808 ql_release_adapter_resources(qdev);
4809}
4810
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004811/*
4812 * This callback is called by the PCI subsystem whenever
4813 * a PCI bus error is detected.
4814 */
4815static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4816 enum pci_channel_state state)
4817{
4818 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004819 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004820
Ron Mercer6d190c62009-10-28 08:39:20 +00004821 switch (state) {
4822 case pci_channel_io_normal:
4823 return PCI_ERS_RESULT_CAN_RECOVER;
4824 case pci_channel_io_frozen:
4825 netif_device_detach(ndev);
4826 if (netif_running(ndev))
4827 ql_eeh_close(ndev);
4828 pci_disable_device(pdev);
4829 return PCI_ERS_RESULT_NEED_RESET;
4830 case pci_channel_io_perm_failure:
4831 dev_err(&pdev->dev,
4832 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004833 ql_eeh_close(ndev);
4834 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004835 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004836 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004837
4838 /* Request a slot reset. */
4839 return PCI_ERS_RESULT_NEED_RESET;
4840}
4841
4842/*
4843 * This callback is called after the PCI buss has been reset.
4844 * Basically, this tries to restart the card from scratch.
4845 * This is a shortened version of the device probe/discovery code,
4846 * it resembles the first-half of the () routine.
4847 */
4848static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4849{
4850 struct net_device *ndev = pci_get_drvdata(pdev);
4851 struct ql_adapter *qdev = netdev_priv(ndev);
4852
Ron Mercer6d190c62009-10-28 08:39:20 +00004853 pdev->error_state = pci_channel_io_normal;
4854
4855 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004856 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004857 netif_err(qdev, ifup, qdev->ndev,
4858 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004859 return PCI_ERS_RESULT_DISCONNECT;
4860 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004861 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004862
4863 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004864 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004865 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004866 return PCI_ERS_RESULT_DISCONNECT;
4867 }
4868
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004869 return PCI_ERS_RESULT_RECOVERED;
4870}
4871
4872static void qlge_io_resume(struct pci_dev *pdev)
4873{
4874 struct net_device *ndev = pci_get_drvdata(pdev);
4875 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004876 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004877
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004878 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004879 err = qlge_open(ndev);
4880 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004881 netif_err(qdev, ifup, qdev->ndev,
4882 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004883 return;
4884 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004885 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004886 netif_err(qdev, ifup, qdev->ndev,
4887 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004888 }
Breno Leitao72046d82010-07-01 03:00:17 +00004889 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004890 netif_device_attach(ndev);
4891}
4892
4893static struct pci_error_handlers qlge_err_handler = {
4894 .error_detected = qlge_io_error_detected,
4895 .slot_reset = qlge_io_slot_reset,
4896 .resume = qlge_io_resume,
4897};
4898
4899static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4900{
4901 struct net_device *ndev = pci_get_drvdata(pdev);
4902 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004903 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004904
4905 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004906 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004907
4908 if (netif_running(ndev)) {
4909 err = ql_adapter_down(qdev);
4910 if (!err)
4911 return err;
4912 }
4913
Ron Mercerbc083ce2009-10-21 11:07:40 +00004914 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004915 err = pci_save_state(pdev);
4916 if (err)
4917 return err;
4918
4919 pci_disable_device(pdev);
4920
4921 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4922
4923 return 0;
4924}
4925
David S. Miller04da2cf2008-09-19 16:14:24 -07004926#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004927static int qlge_resume(struct pci_dev *pdev)
4928{
4929 struct net_device *ndev = pci_get_drvdata(pdev);
4930 struct ql_adapter *qdev = netdev_priv(ndev);
4931 int err;
4932
4933 pci_set_power_state(pdev, PCI_D0);
4934 pci_restore_state(pdev);
4935 err = pci_enable_device(pdev);
4936 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004937 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004938 return err;
4939 }
4940 pci_set_master(pdev);
4941
4942 pci_enable_wake(pdev, PCI_D3hot, 0);
4943 pci_enable_wake(pdev, PCI_D3cold, 0);
4944
4945 if (netif_running(ndev)) {
4946 err = ql_adapter_up(qdev);
4947 if (err)
4948 return err;
4949 }
4950
Breno Leitao72046d82010-07-01 03:00:17 +00004951 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004952 netif_device_attach(ndev);
4953
4954 return 0;
4955}
David S. Miller04da2cf2008-09-19 16:14:24 -07004956#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004957
4958static void qlge_shutdown(struct pci_dev *pdev)
4959{
4960 qlge_suspend(pdev, PMSG_SUSPEND);
4961}
4962
4963static struct pci_driver qlge_driver = {
4964 .name = DRV_NAME,
4965 .id_table = qlge_pci_tbl,
4966 .probe = qlge_probe,
4967 .remove = __devexit_p(qlge_remove),
4968#ifdef CONFIG_PM
4969 .suspend = qlge_suspend,
4970 .resume = qlge_resume,
4971#endif
4972 .shutdown = qlge_shutdown,
4973 .err_handler = &qlge_err_handler
4974};
4975
4976static int __init qlge_init_module(void)
4977{
4978 return pci_register_driver(&qlge_driver);
4979}
4980
4981static void __exit qlge_exit(void)
4982{
4983 pci_unregister_driver(&qlge_driver);
4984}
4985
4986module_init(qlge_init_module);
4987module_exit(qlge_exit);