blob: 930ae45457bbc11df0316f07ab803a347fc5b75d [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040041#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000061/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040063/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
Sonny Rao84cf7022010-11-18 11:50:02 +000066static int debug = -1; /* defaults above */
67module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040068MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000073static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000074module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000075MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040076
Ron Mercer8aae2602010-01-15 13:31:28 +000077static int qlge_mpi_coredump;
78module_param(qlge_mpi_coredump, int, 0);
79MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000081 "Default is OFF - Do Not allocate memory. ");
82
83static int qlge_force_coredump;
84module_param(qlge_force_coredump, int, 0);
85MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000088
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000089static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040092 /* required last entry */
93 {0,}
94};
95
96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
stephen hemmingerac409212010-10-21 07:50:54 +000098static int ql_wol(struct ql_adapter *qdev);
99static void qlge_set_multicast_list(struct net_device *ndev);
100
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400101/* This hardware semaphore causes exclusive access to
102 * resources shared between the NIC driver, MPI firmware,
103 * FCOE firmware and the FC driver.
104 */
105static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106{
107 u32 sem_bits = 0;
108
109 switch (sem_mask) {
110 case SEM_XGMAC0_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
112 break;
113 case SEM_XGMAC1_MASK:
114 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
115 break;
116 case SEM_ICB_MASK:
117 sem_bits = SEM_SET << SEM_ICB_SHIFT;
118 break;
119 case SEM_MAC_ADDR_MASK:
120 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
121 break;
122 case SEM_FLASH_MASK:
123 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
124 break;
125 case SEM_PROBE_MASK:
126 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
127 break;
128 case SEM_RT_IDX_MASK:
129 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
130 break;
131 case SEM_PROC_REG_MASK:
132 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
133 break;
134 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000135 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400136 return -EINVAL;
137 }
138
139 ql_write32(qdev, SEM, sem_bits | sem_mask);
140 return !(ql_read32(qdev, SEM) & sem_bits);
141}
142
143int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
144{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000145 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400146 do {
147 if (!ql_sem_trylock(qdev, sem_mask))
148 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000149 udelay(100);
150 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400151 return -ETIMEDOUT;
152}
153
154void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
155{
156 ql_write32(qdev, SEM, sem_mask);
157 ql_read32(qdev, SEM); /* flush */
158}
159
160/* This function waits for a specific bit to come ready
161 * in a given register. It is used mostly by the initialize
162 * process, but is also used in kernel thread API such as
163 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
164 */
165int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
166{
167 u32 temp;
168 int count = UDELAY_COUNT;
169
170 while (count) {
171 temp = ql_read32(qdev, reg);
172
173 /* check for errors */
174 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000175 netif_alert(qdev, probe, qdev->ndev,
176 "register 0x%.08x access error, value = 0x%.08x!.\n",
177 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400178 return -EIO;
179 } else if (temp & bit)
180 return 0;
181 udelay(UDELAY_DELAY);
182 count--;
183 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000184 netif_alert(qdev, probe, qdev->ndev,
185 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400186 return -ETIMEDOUT;
187}
188
189/* The CFG register is used to download TX and RX control blocks
190 * to the chip. This function waits for an operation to complete.
191 */
192static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
193{
194 int count = UDELAY_COUNT;
195 u32 temp;
196
197 while (count) {
198 temp = ql_read32(qdev, CFG);
199 if (temp & CFG_LE)
200 return -EIO;
201 if (!(temp & bit))
202 return 0;
203 udelay(UDELAY_DELAY);
204 count--;
205 }
206 return -ETIMEDOUT;
207}
208
209
210/* Used to issue init control blocks to hw. Maps control block,
211 * sets address, triggers download, waits for completion.
212 */
213int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
214 u16 q_id)
215{
216 u64 map;
217 int status = 0;
218 int direction;
219 u32 mask;
220 u32 value;
221
222 direction =
223 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
224 PCI_DMA_FROMDEVICE;
225
226 map = pci_map_single(qdev->pdev, ptr, size, direction);
227 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000228 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229 return -ENOMEM;
230 }
231
Ron Mercer4322c5b2009-07-02 06:06:06 +0000232 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
233 if (status)
234 return status;
235
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400236 status = ql_wait_cfg(qdev, bit);
237 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000238 netif_err(qdev, ifup, qdev->ndev,
239 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 goto exit;
241 }
242
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400243 ql_write32(qdev, ICB_L, (u32) map);
244 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400245
246 mask = CFG_Q_MASK | (bit << 16);
247 value = bit | (q_id << CFG_Q_SHIFT);
248 ql_write32(qdev, CFG, (mask | value));
249
250 /*
251 * Wait for the bit to clear after signaling hw.
252 */
253 status = ql_wait_cfg(qdev, bit);
254exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000255 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400256 pci_unmap_single(qdev->pdev, map, size, direction);
257 return status;
258}
259
260/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
261int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
262 u32 *value)
263{
264 u32 offset = 0;
265 int status;
266
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400267 switch (type) {
268 case MAC_ADDR_TYPE_MULTI_MAC:
269 case MAC_ADDR_TYPE_CAM_MAC:
270 {
271 status =
272 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800273 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400274 if (status)
275 goto exit;
276 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277 (index << MAC_ADDR_IDX_SHIFT) | /* index */
278 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279 status =
280 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800281 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400282 if (status)
283 goto exit;
284 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285 status =
286 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800287 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400288 if (status)
289 goto exit;
290 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
291 (index << MAC_ADDR_IDX_SHIFT) | /* index */
292 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293 status =
294 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800295 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400296 if (status)
297 goto exit;
298 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
299 if (type == MAC_ADDR_TYPE_CAM_MAC) {
300 status =
301 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800302 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400303 if (status)
304 goto exit;
305 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
306 (index << MAC_ADDR_IDX_SHIFT) | /* index */
307 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
308 status =
309 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800310 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400311 if (status)
312 goto exit;
313 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
314 }
315 break;
316 }
317 case MAC_ADDR_TYPE_VLAN:
318 case MAC_ADDR_TYPE_MULTI_FLTR:
319 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000320 netif_crit(qdev, ifup, qdev->ndev,
321 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400322 status = -EPERM;
323 }
324exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400325 return status;
326}
327
328/* Set up a MAC, multicast or VLAN address for the
329 * inbound frame matching.
330 */
331static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
332 u16 index)
333{
334 u32 offset = 0;
335 int status = 0;
336
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400337 switch (type) {
338 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000339 {
340 u32 upper = (addr[0] << 8) | addr[1];
341 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
342 (addr[4] << 8) | (addr[5]);
343
344 status =
345 ql_wait_reg_rdy(qdev,
346 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
347 if (status)
348 goto exit;
349 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
350 (index << MAC_ADDR_IDX_SHIFT) |
351 type | MAC_ADDR_E);
352 ql_write32(qdev, MAC_ADDR_DATA, lower);
353 status =
354 ql_wait_reg_rdy(qdev,
355 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
356 if (status)
357 goto exit;
358 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
359 (index << MAC_ADDR_IDX_SHIFT) |
360 type | MAC_ADDR_E);
361
362 ql_write32(qdev, MAC_ADDR_DATA, upper);
363 status =
364 ql_wait_reg_rdy(qdev,
365 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366 if (status)
367 goto exit;
368 break;
369 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400370 case MAC_ADDR_TYPE_CAM_MAC:
371 {
372 u32 cam_output;
373 u32 upper = (addr[0] << 8) | addr[1];
374 u32 lower =
375 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
376 (addr[5]);
377
Joe Perchesae9540f72010-02-09 11:49:52 +0000378 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
379 "Adding %s address %pM at index %d in the CAM.\n",
380 type == MAC_ADDR_TYPE_MULTI_MAC ?
381 "MULTICAST" : "UNICAST",
382 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383
384 status =
385 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800386 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400387 if (status)
388 goto exit;
389 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
390 (index << MAC_ADDR_IDX_SHIFT) | /* index */
391 type); /* type */
392 ql_write32(qdev, MAC_ADDR_DATA, lower);
393 status =
394 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800395 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400396 if (status)
397 goto exit;
398 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
399 (index << MAC_ADDR_IDX_SHIFT) | /* index */
400 type); /* type */
401 ql_write32(qdev, MAC_ADDR_DATA, upper);
402 status =
403 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800404 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400405 if (status)
406 goto exit;
407 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
408 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 type); /* type */
410 /* This field should also include the queue id
411 and possibly the function id. Right now we hardcode
412 the route field to NIC core.
413 */
Ron Mercer76b26692009-10-08 09:54:40 +0000414 cam_output = (CAM_OUT_ROUTE_NIC |
415 (qdev->
416 func << CAM_OUT_FUNC_SHIFT) |
417 (0 << CAM_OUT_CQ_ID_SHIFT));
418 if (qdev->vlgrp)
419 cam_output |= CAM_OUT_RV;
420 /* route to NIC core */
421 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400422 break;
423 }
424 case MAC_ADDR_TYPE_VLAN:
425 {
426 u32 enable_bit = *((u32 *) &addr[0]);
427 /* For VLAN, the addr actually holds a bit that
428 * either enables or disables the vlan id we are
429 * addressing. It's either MAC_ADDR_E on or off.
430 * That's bit-27 we're talking about.
431 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000432 netif_info(qdev, ifup, qdev->ndev,
433 "%s VLAN ID %d %s the CAM.\n",
434 enable_bit ? "Adding" : "Removing",
435 index,
436 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400437
438 status =
439 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800440 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 if (status)
442 goto exit;
443 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
444 (index << MAC_ADDR_IDX_SHIFT) | /* index */
445 type | /* type */
446 enable_bit); /* enable/disable */
447 break;
448 }
449 case MAC_ADDR_TYPE_MULTI_FLTR:
450 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000451 netif_crit(qdev, ifup, qdev->ndev,
452 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400453 status = -EPERM;
454 }
455exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400456 return status;
457}
458
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000459/* Set or clear MAC address in hardware. We sometimes
460 * have to clear it to prevent wrong frame routing
461 * especially in a bonding environment.
462 */
463static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
464{
465 int status;
466 char zero_mac_addr[ETH_ALEN];
467 char *addr;
468
469 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000470 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000471 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
472 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000473 } else {
474 memset(zero_mac_addr, 0, ETH_ALEN);
475 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000476 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
477 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000478 }
479 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
480 if (status)
481 return status;
482 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
483 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
484 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
485 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000486 netif_err(qdev, ifup, qdev->ndev,
487 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000488 return status;
489}
490
Ron Mercer6a473302009-07-02 06:06:12 +0000491void ql_link_on(struct ql_adapter *qdev)
492{
Joe Perchesae9540f72010-02-09 11:49:52 +0000493 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000494 netif_carrier_on(qdev->ndev);
495 ql_set_mac_addr(qdev, 1);
496}
497
498void ql_link_off(struct ql_adapter *qdev)
499{
Joe Perchesae9540f72010-02-09 11:49:52 +0000500 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000501 netif_carrier_off(qdev->ndev);
502 ql_set_mac_addr(qdev, 0);
503}
504
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400505/* Get a specific frame routing value from the CAM.
506 * Used for debug and reg dump.
507 */
508int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
509{
510 int status = 0;
511
Ron Mercer939678f2009-01-04 17:08:29 -0800512 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400513 if (status)
514 goto exit;
515
516 ql_write32(qdev, RT_IDX,
517 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800518 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400519 if (status)
520 goto exit;
521 *value = ql_read32(qdev, RT_DATA);
522exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 return status;
524}
525
526/* The NIC function for this chip has 16 routing indexes. Each one can be used
527 * to route different frame types to various inbound queues. We send broadcast/
528 * multicast/error frames to the default queue for slow handling,
529 * and CAM hit/RSS frames to the fast handling queues.
530 */
531static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
532 int enable)
533{
Ron Mercer8587ea32009-02-23 10:42:15 +0000534 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400535 u32 value = 0;
536
Joe Perchesae9540f72010-02-09 11:49:52 +0000537 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
538 "%s %s mask %s the routing reg.\n",
539 enable ? "Adding" : "Removing",
540 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
541 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
542 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
543 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
544 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
545 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
546 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
547 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
548 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
549 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
550 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
551 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
552 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
553 index == RT_IDX_UNUSED013 ? "UNUSED13" :
554 index == RT_IDX_UNUSED014 ? "UNUSED14" :
555 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
556 "(Bad index != RT_IDX)",
557 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400558
559 switch (mask) {
560 case RT_IDX_CAM_HIT:
561 {
562 value = RT_IDX_DST_CAM_Q | /* dest */
563 RT_IDX_TYPE_NICQ | /* type */
564 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
565 break;
566 }
567 case RT_IDX_VALID: /* Promiscuous Mode frames. */
568 {
569 value = RT_IDX_DST_DFLT_Q | /* dest */
570 RT_IDX_TYPE_NICQ | /* type */
571 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
572 break;
573 }
574 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
575 {
576 value = RT_IDX_DST_DFLT_Q | /* dest */
577 RT_IDX_TYPE_NICQ | /* type */
578 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
579 break;
580 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000581 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
582 {
583 value = RT_IDX_DST_DFLT_Q | /* dest */
584 RT_IDX_TYPE_NICQ | /* type */
585 (RT_IDX_IP_CSUM_ERR_SLOT <<
586 RT_IDX_IDX_SHIFT); /* index */
587 break;
588 }
589 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
590 {
591 value = RT_IDX_DST_DFLT_Q | /* dest */
592 RT_IDX_TYPE_NICQ | /* type */
593 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
594 RT_IDX_IDX_SHIFT); /* index */
595 break;
596 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400597 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
605 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000606 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400607 RT_IDX_TYPE_NICQ | /* type */
608 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
609 break;
610 }
611 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
612 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000613 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 RT_IDX_TYPE_NICQ | /* type */
615 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
616 break;
617 }
618 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
619 {
620 value = RT_IDX_DST_RSS | /* dest */
621 RT_IDX_TYPE_NICQ | /* type */
622 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
623 break;
624 }
625 case 0: /* Clear the E-bit on an entry. */
626 {
627 value = RT_IDX_DST_DFLT_Q | /* dest */
628 RT_IDX_TYPE_NICQ | /* type */
629 (index << RT_IDX_IDX_SHIFT);/* index */
630 break;
631 }
632 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000633 netif_err(qdev, ifup, qdev->ndev,
634 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400635 status = -EPERM;
636 goto exit;
637 }
638
639 if (value) {
640 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
641 if (status)
642 goto exit;
643 value |= (enable ? RT_IDX_E : 0);
644 ql_write32(qdev, RT_IDX, value);
645 ql_write32(qdev, RT_DATA, enable ? mask : 0);
646 }
647exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400648 return status;
649}
650
651static void ql_enable_interrupts(struct ql_adapter *qdev)
652{
653 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
654}
655
656static void ql_disable_interrupts(struct ql_adapter *qdev)
657{
658 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
659}
660
661/* If we're running with multiple MSI-X vectors then we enable on the fly.
662 * Otherwise, we may have multiple outstanding workers and don't want to
663 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300664 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400665 * a worker finishes. Once it hits zero we enable the interrupt.
666 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700667u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400668{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700669 u32 var = 0;
670 unsigned long hw_flags = 0;
671 struct intr_context *ctx = qdev->intr_context + intr;
672
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
674 /* Always enable if we're MSIX multi interrupts and
675 * it's not the default (zeroeth) interrupt.
676 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 ctx->intr_en_mask);
679 var = ql_read32(qdev, STS);
680 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700682
683 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
684 if (atomic_dec_and_test(&ctx->irq_cnt)) {
685 ql_write32(qdev, INTR_EN,
686 ctx->intr_en_mask);
687 var = ql_read32(qdev, STS);
688 }
689 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
690 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400691}
692
693static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
694{
695 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700696 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400697
Ron Mercerbb0d2152008-10-20 10:30:26 -0700698 /* HW disables for us if we're MSIX multi interrupts and
699 * it's not the default (zeroeth) interrupt.
700 */
701 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
702 return 0;
703
704 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000705 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700706 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400707 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700708 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400709 var = ql_read32(qdev, STS);
710 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700711 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000712 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400713 return var;
714}
715
716static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
717{
718 int i;
719 for (i = 0; i < qdev->intr_count; i++) {
720 /* The enable call does a atomic_dec_and_test
721 * and enables only if the result is zero.
722 * So we precharge it here.
723 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700724 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
725 i == 0))
726 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400727 ql_enable_completion_interrupt(qdev, i);
728 }
729
730}
731
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000732static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
733{
734 int status, i;
735 u16 csum = 0;
736 __le16 *flash = (__le16 *)&qdev->flash;
737
738 status = strncmp((char *)&qdev->flash, str, 4);
739 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000740 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000741 return status;
742 }
743
744 for (i = 0; i < size; i++)
745 csum += le16_to_cpu(*flash++);
746
747 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000748 netif_err(qdev, ifup, qdev->ndev,
749 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000750
751 return csum;
752}
753
Ron Mercer26351472009-02-02 13:53:57 -0800754static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400755{
756 int status = 0;
757 /* wait for reg to come ready */
758 status = ql_wait_reg_rdy(qdev,
759 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
760 if (status)
761 goto exit;
762 /* set up for reg read */
763 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
764 /* wait for reg to come ready */
765 status = ql_wait_reg_rdy(qdev,
766 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
767 if (status)
768 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800769 /* This data is stored on flash as an array of
770 * __le32. Since ql_read32() returns cpu endian
771 * we need to swap it back.
772 */
773 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400774exit:
775 return status;
776}
777
Ron Mercercdca8d02009-03-02 08:07:31 +0000778static int ql_get_8000_flash_params(struct ql_adapter *qdev)
779{
780 u32 i, size;
781 int status;
782 __le32 *p = (__le32 *)&qdev->flash;
783 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000784 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000785
786 /* Get flash offset for function and adjust
787 * for dword access.
788 */
Ron Mercere4552f52009-06-09 05:39:32 +0000789 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000790 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
791 else
792 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
793
794 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
795 return -ETIMEDOUT;
796
797 size = sizeof(struct flash_params_8000) / sizeof(u32);
798 for (i = 0; i < size; i++, p++) {
799 status = ql_read_flash_word(qdev, i+offset, p);
800 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000801 netif_err(qdev, ifup, qdev->ndev,
802 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000803 goto exit;
804 }
805 }
806
807 status = ql_validate_flash(qdev,
808 sizeof(struct flash_params_8000) / sizeof(u16),
809 "8000");
810 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000811 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000812 status = -EINVAL;
813 goto exit;
814 }
815
Ron Mercer542512e2009-06-09 05:39:33 +0000816 /* Extract either manufacturer or BOFM modified
817 * MAC address.
818 */
819 if (qdev->flash.flash_params_8000.data_type1 == 2)
820 memcpy(mac_addr,
821 qdev->flash.flash_params_8000.mac_addr1,
822 qdev->ndev->addr_len);
823 else
824 memcpy(mac_addr,
825 qdev->flash.flash_params_8000.mac_addr,
826 qdev->ndev->addr_len);
827
828 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000830 status = -EINVAL;
831 goto exit;
832 }
833
834 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000835 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000836 qdev->ndev->addr_len);
837
838exit:
839 ql_sem_unlock(qdev, SEM_FLASH_MASK);
840 return status;
841}
842
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000843static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400844{
845 int i;
846 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800847 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800848 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000849 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800850
851 /* Second function's parameters follow the first
852 * function's.
853 */
Ron Mercere4552f52009-06-09 05:39:32 +0000854 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000855 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400856
857 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
858 return -ETIMEDOUT;
859
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000860 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800861 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400862 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000863 netif_err(qdev, ifup, qdev->ndev,
864 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400865 goto exit;
866 }
867
868 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000869
870 status = ql_validate_flash(qdev,
871 sizeof(struct flash_params_8012) / sizeof(u16),
872 "8012");
873 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000874 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000875 status = -EINVAL;
876 goto exit;
877 }
878
879 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
880 status = -EINVAL;
881 goto exit;
882 }
883
884 memcpy(qdev->ndev->dev_addr,
885 qdev->flash.flash_params_8012.mac_addr,
886 qdev->ndev->addr_len);
887
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400888exit:
889 ql_sem_unlock(qdev, SEM_FLASH_MASK);
890 return status;
891}
892
893/* xgmac register are located behind the xgmac_addr and xgmac_data
894 * register pair. Each read/write requires us to wait for the ready
895 * bit before reading/writing the data.
896 */
897static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
898{
899 int status;
900 /* wait for reg to come ready */
901 status = ql_wait_reg_rdy(qdev,
902 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
903 if (status)
904 return status;
905 /* write the data to the data reg */
906 ql_write32(qdev, XGMAC_DATA, data);
907 /* trigger the write */
908 ql_write32(qdev, XGMAC_ADDR, reg);
909 return status;
910}
911
912/* xgmac register are located behind the xgmac_addr and xgmac_data
913 * register pair. Each read/write requires us to wait for the ready
914 * bit before reading/writing the data.
915 */
916int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
917{
918 int status = 0;
919 /* wait for reg to come ready */
920 status = ql_wait_reg_rdy(qdev,
921 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
922 if (status)
923 goto exit;
924 /* set up for reg read */
925 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
926 /* wait for reg to come ready */
927 status = ql_wait_reg_rdy(qdev,
928 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
929 if (status)
930 goto exit;
931 /* get the data */
932 *data = ql_read32(qdev, XGMAC_DATA);
933exit:
934 return status;
935}
936
937/* This is used for reading the 64-bit statistics regs. */
938int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
939{
940 int status = 0;
941 u32 hi = 0;
942 u32 lo = 0;
943
944 status = ql_read_xgmac_reg(qdev, reg, &lo);
945 if (status)
946 goto exit;
947
948 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
949 if (status)
950 goto exit;
951
952 *data = (u64) lo | ((u64) hi << 32);
953
954exit:
955 return status;
956}
957
Ron Mercercdca8d02009-03-02 08:07:31 +0000958static int ql_8000_port_initialize(struct ql_adapter *qdev)
959{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000960 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000961 /*
962 * Get MPI firmware version for driver banner
963 * and ethool info.
964 */
965 status = ql_mb_about_fw(qdev);
966 if (status)
967 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000968 status = ql_mb_get_fw_state(qdev);
969 if (status)
970 goto exit;
971 /* Wake up a worker to get/set the TX/RX frame sizes. */
972 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
973exit:
974 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000975}
976
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400977/* Take the MAC Core out of reset.
978 * Enable statistics counting.
979 * Take the transmitter/receiver out of reset.
980 * This functionality may be done in the MPI firmware at a
981 * later date.
982 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000983static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400984{
985 int status = 0;
986 u32 data;
987
988 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
989 /* Another function has the semaphore, so
990 * wait for the port init bit to come ready.
991 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000992 netif_info(qdev, link, qdev->ndev,
993 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400994 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
995 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000996 netif_crit(qdev, link, qdev->ndev,
997 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400998 }
999 return status;
1000 }
1001
Joe Perchesae9540f72010-02-09 11:49:52 +00001002 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001003 /* Set the core reset. */
1004 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1005 if (status)
1006 goto end;
1007 data |= GLOBAL_CFG_RESET;
1008 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1009 if (status)
1010 goto end;
1011
1012 /* Clear the core reset and turn on jumbo for receiver. */
1013 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1014 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1015 data |= GLOBAL_CFG_TX_STAT_EN;
1016 data |= GLOBAL_CFG_RX_STAT_EN;
1017 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1018 if (status)
1019 goto end;
1020
1021 /* Enable transmitter, and clear it's reset. */
1022 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1023 if (status)
1024 goto end;
1025 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1026 data |= TX_CFG_EN; /* Enable the transmitter. */
1027 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1028 if (status)
1029 goto end;
1030
1031 /* Enable receiver and clear it's reset. */
1032 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1033 if (status)
1034 goto end;
1035 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1036 data |= RX_CFG_EN; /* Enable the receiver. */
1037 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1038 if (status)
1039 goto end;
1040
1041 /* Turn on jumbo. */
1042 status =
1043 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1044 if (status)
1045 goto end;
1046 status =
1047 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1048 if (status)
1049 goto end;
1050
1051 /* Signal to the world that the port is enabled. */
1052 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1053end:
1054 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1055 return status;
1056}
1057
Ron Mercer7c734352009-10-19 03:32:19 +00001058static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1059{
1060 return PAGE_SIZE << qdev->lbq_buf_order;
1061}
1062
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001063/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001064static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001065{
1066 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1067 rx_ring->lbq_curr_idx++;
1068 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1069 rx_ring->lbq_curr_idx = 0;
1070 rx_ring->lbq_free_cnt++;
1071 return lbq_desc;
1072}
1073
Ron Mercer7c734352009-10-19 03:32:19 +00001074static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1075 struct rx_ring *rx_ring)
1076{
1077 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1078
1079 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001080 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001081 rx_ring->lbq_buf_size,
1082 PCI_DMA_FROMDEVICE);
1083
1084 /* If it's the last chunk of our master page then
1085 * we unmap it.
1086 */
1087 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1088 == ql_lbq_block_size(qdev))
1089 pci_unmap_page(qdev->pdev,
1090 lbq_desc->p.pg_chunk.map,
1091 ql_lbq_block_size(qdev),
1092 PCI_DMA_FROMDEVICE);
1093 return lbq_desc;
1094}
1095
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001096/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001097static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001098{
1099 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1100 rx_ring->sbq_curr_idx++;
1101 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1102 rx_ring->sbq_curr_idx = 0;
1103 rx_ring->sbq_free_cnt++;
1104 return sbq_desc;
1105}
1106
1107/* Update an rx ring index. */
1108static void ql_update_cq(struct rx_ring *rx_ring)
1109{
1110 rx_ring->cnsmr_idx++;
1111 rx_ring->curr_entry++;
1112 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1113 rx_ring->cnsmr_idx = 0;
1114 rx_ring->curr_entry = rx_ring->cq_base;
1115 }
1116}
1117
1118static void ql_write_cq_idx(struct rx_ring *rx_ring)
1119{
1120 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1121}
1122
Ron Mercer7c734352009-10-19 03:32:19 +00001123static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1124 struct bq_desc *lbq_desc)
1125{
1126 if (!rx_ring->pg_chunk.page) {
1127 u64 map;
1128 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1129 GFP_ATOMIC,
1130 qdev->lbq_buf_order);
1131 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001132 netif_err(qdev, drv, qdev->ndev,
1133 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001134 return -ENOMEM;
1135 }
1136 rx_ring->pg_chunk.offset = 0;
1137 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1138 0, ql_lbq_block_size(qdev),
1139 PCI_DMA_FROMDEVICE);
1140 if (pci_dma_mapping_error(qdev->pdev, map)) {
1141 __free_pages(rx_ring->pg_chunk.page,
1142 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001143 netif_err(qdev, drv, qdev->ndev,
1144 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001145 return -ENOMEM;
1146 }
1147 rx_ring->pg_chunk.map = map;
1148 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1149 }
1150
1151 /* Copy the current master pg_chunk info
1152 * to the current descriptor.
1153 */
1154 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1155
1156 /* Adjust the master page chunk for next
1157 * buffer get.
1158 */
1159 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1160 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1161 rx_ring->pg_chunk.page = NULL;
1162 lbq_desc->p.pg_chunk.last_flag = 1;
1163 } else {
1164 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1165 get_page(rx_ring->pg_chunk.page);
1166 lbq_desc->p.pg_chunk.last_flag = 0;
1167 }
1168 return 0;
1169}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001170/* Process (refill) a large buffer queue. */
1171static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1172{
Ron Mercer49f21862009-02-23 10:42:16 +00001173 u32 clean_idx = rx_ring->lbq_clean_idx;
1174 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001175 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 u64 map;
1177 int i;
1178
Ron Mercer7c734352009-10-19 03:32:19 +00001179 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001180 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001181 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1182 "lbq: try cleaning clean_idx = %d.\n",
1183 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001184 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001185 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001186 netif_err(qdev, ifup, qdev->ndev,
1187 "Could not get a page chunk.\n");
1188 return;
1189 }
Ron Mercer7c734352009-10-19 03:32:19 +00001190
1191 map = lbq_desc->p.pg_chunk.map +
1192 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001193 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1194 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001195 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001196 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001197
1198 pci_dma_sync_single_for_device(qdev->pdev, map,
1199 rx_ring->lbq_buf_size,
1200 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 clean_idx++;
1202 if (clean_idx == rx_ring->lbq_len)
1203 clean_idx = 0;
1204 }
1205
1206 rx_ring->lbq_clean_idx = clean_idx;
1207 rx_ring->lbq_prod_idx += 16;
1208 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1209 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001210 rx_ring->lbq_free_cnt -= 16;
1211 }
1212
1213 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1215 "lbq: updating prod idx = %d.\n",
1216 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001217 ql_write_db_reg(rx_ring->lbq_prod_idx,
1218 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001219 }
1220}
1221
1222/* Process (refill) a small buffer queue. */
1223static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1224{
Ron Mercer49f21862009-02-23 10:42:16 +00001225 u32 clean_idx = rx_ring->sbq_clean_idx;
1226 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001227 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 u64 map;
1229 int i;
1230
1231 while (rx_ring->sbq_free_cnt > 16) {
1232 for (i = 0; i < 16; i++) {
1233 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001234 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1235 "sbq: try cleaning clean_idx = %d.\n",
1236 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001237 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001238 netif_printk(qdev, rx_status, KERN_DEBUG,
1239 qdev->ndev,
1240 "sbq: getting new skb for index %d.\n",
1241 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001242 sbq_desc->p.skb =
1243 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001244 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001245 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001246 netif_err(qdev, probe, qdev->ndev,
1247 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001248 rx_ring->sbq_clean_idx = clean_idx;
1249 return;
1250 }
1251 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1252 map = pci_map_single(qdev->pdev,
1253 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001254 rx_ring->sbq_buf_size,
1255 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001256 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001257 netif_err(qdev, ifup, qdev->ndev,
1258 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001259 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001260 dev_kfree_skb_any(sbq_desc->p.skb);
1261 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001262 return;
1263 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001264 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1265 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001266 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001267 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001268 }
1269
1270 clean_idx++;
1271 if (clean_idx == rx_ring->sbq_len)
1272 clean_idx = 0;
1273 }
1274 rx_ring->sbq_clean_idx = clean_idx;
1275 rx_ring->sbq_prod_idx += 16;
1276 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1277 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001278 rx_ring->sbq_free_cnt -= 16;
1279 }
1280
1281 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001282 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1283 "sbq: updating prod idx = %d.\n",
1284 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001285 ql_write_db_reg(rx_ring->sbq_prod_idx,
1286 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288}
1289
1290static void ql_update_buffer_queues(struct ql_adapter *qdev,
1291 struct rx_ring *rx_ring)
1292{
1293 ql_update_sbq(qdev, rx_ring);
1294 ql_update_lbq(qdev, rx_ring);
1295}
1296
1297/* Unmaps tx buffers. Can be called from send() if a pci mapping
1298 * fails at some stage, or from the interrupt when a tx completes.
1299 */
1300static void ql_unmap_send(struct ql_adapter *qdev,
1301 struct tx_ring_desc *tx_ring_desc, int mapped)
1302{
1303 int i;
1304 for (i = 0; i < mapped; i++) {
1305 if (i == 0 || (i == 7 && mapped > 7)) {
1306 /*
1307 * Unmap the skb->data area, or the
1308 * external sglist (AKA the Outbound
1309 * Address List (OAL)).
1310 * If its the zeroeth element, then it's
1311 * the skb->data area. If it's the 7th
1312 * element and there is more than 6 frags,
1313 * then its an OAL.
1314 */
1315 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001316 netif_printk(qdev, tx_done, KERN_DEBUG,
1317 qdev->ndev,
1318 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001319 }
1320 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001321 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001322 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001323 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001324 maplen),
1325 PCI_DMA_TODEVICE);
1326 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001327 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1328 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001329 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001330 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001331 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001332 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333 maplen), PCI_DMA_TODEVICE);
1334 }
1335 }
1336
1337}
1338
1339/* Map the buffers for this transmit. This will return
1340 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1341 */
1342static int ql_map_send(struct ql_adapter *qdev,
1343 struct ob_mac_iocb_req *mac_iocb_ptr,
1344 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1345{
1346 int len = skb_headlen(skb);
1347 dma_addr_t map;
1348 int frag_idx, err, map_idx = 0;
1349 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1350 int frag_cnt = skb_shinfo(skb)->nr_frags;
1351
1352 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001353 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1354 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001355 }
1356 /*
1357 * Map the skb buffer first.
1358 */
1359 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1360
1361 err = pci_dma_mapping_error(qdev->pdev, map);
1362 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001363 netif_err(qdev, tx_queued, qdev->ndev,
1364 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001365
1366 return NETDEV_TX_BUSY;
1367 }
1368
1369 tbd->len = cpu_to_le32(len);
1370 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001371 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1372 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001373 map_idx++;
1374
1375 /*
1376 * This loop fills the remainder of the 8 address descriptors
1377 * in the IOCB. If there are more than 7 fragments, then the
1378 * eighth address desc will point to an external list (OAL).
1379 * When this happens, the remainder of the frags will be stored
1380 * in this list.
1381 */
1382 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1383 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1384 tbd++;
1385 if (frag_idx == 6 && frag_cnt > 7) {
1386 /* Let's tack on an sglist.
1387 * Our control block will now
1388 * look like this:
1389 * iocb->seg[0] = skb->data
1390 * iocb->seg[1] = frag[0]
1391 * iocb->seg[2] = frag[1]
1392 * iocb->seg[3] = frag[2]
1393 * iocb->seg[4] = frag[3]
1394 * iocb->seg[5] = frag[4]
1395 * iocb->seg[6] = frag[5]
1396 * iocb->seg[7] = ptr to OAL (external sglist)
1397 * oal->seg[0] = frag[6]
1398 * oal->seg[1] = frag[7]
1399 * oal->seg[2] = frag[8]
1400 * oal->seg[3] = frag[9]
1401 * oal->seg[4] = frag[10]
1402 * etc...
1403 */
1404 /* Tack on the OAL in the eighth segment of IOCB. */
1405 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1406 sizeof(struct oal),
1407 PCI_DMA_TODEVICE);
1408 err = pci_dma_mapping_error(qdev->pdev, map);
1409 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001410 netif_err(qdev, tx_queued, qdev->ndev,
1411 "PCI mapping outbound address list with error: %d\n",
1412 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001413 goto map_error;
1414 }
1415
1416 tbd->addr = cpu_to_le64(map);
1417 /*
1418 * The length is the number of fragments
1419 * that remain to be mapped times the length
1420 * of our sglist (OAL).
1421 */
1422 tbd->len =
1423 cpu_to_le32((sizeof(struct tx_buf_desc) *
1424 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001425 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001426 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001427 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001428 sizeof(struct oal));
1429 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1430 map_idx++;
1431 }
1432
1433 map =
1434 pci_map_page(qdev->pdev, frag->page,
1435 frag->page_offset, frag->size,
1436 PCI_DMA_TODEVICE);
1437
1438 err = pci_dma_mapping_error(qdev->pdev, map);
1439 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001440 netif_err(qdev, tx_queued, qdev->ndev,
1441 "PCI mapping frags failed with error: %d.\n",
1442 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001443 goto map_error;
1444 }
1445
1446 tbd->addr = cpu_to_le64(map);
1447 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001448 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1449 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001450 frag->size);
1451
1452 }
1453 /* Save the number of segments we've mapped. */
1454 tx_ring_desc->map_cnt = map_idx;
1455 /* Terminate the last segment. */
1456 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1457 return NETDEV_TX_OK;
1458
1459map_error:
1460 /*
1461 * If the first frag mapping failed, then i will be zero.
1462 * This causes the unmap of the skb->data area. Otherwise
1463 * we pass in the number of frags that mapped successfully
1464 * so they can be umapped.
1465 */
1466 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1467 return NETDEV_TX_BUSY;
1468}
1469
Ron Mercer4f848c02010-01-02 10:37:43 +00001470/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001471static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1472 struct rx_ring *rx_ring,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 u32 length,
1475 u16 vlan_id)
1476{
1477 struct sk_buff *skb;
1478 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1479 struct skb_frag_struct *rx_frag;
1480 int nr_frags;
1481 struct napi_struct *napi = &rx_ring->napi;
1482
1483 napi->dev = qdev->ndev;
1484
1485 skb = napi_get_frags(napi);
1486 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001487 netif_err(qdev, drv, qdev->ndev,
1488 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001489 rx_ring->rx_dropped++;
1490 put_page(lbq_desc->p.pg_chunk.page);
1491 return;
1492 }
1493 prefetch(lbq_desc->p.pg_chunk.va);
1494 rx_frag = skb_shinfo(skb)->frags;
1495 nr_frags = skb_shinfo(skb)->nr_frags;
1496 rx_frag += nr_frags;
1497 rx_frag->page = lbq_desc->p.pg_chunk.page;
1498 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1499 rx_frag->size = length;
1500
1501 skb->len += length;
1502 skb->data_len += length;
1503 skb->truesize += length;
1504 skb_shinfo(skb)->nr_frags++;
1505
1506 rx_ring->rx_packets++;
1507 rx_ring->rx_bytes += length;
1508 skb->ip_summed = CHECKSUM_UNNECESSARY;
1509 skb_record_rx_queue(skb, rx_ring->cq_id);
1510 if (qdev->vlgrp && (vlan_id != 0xffff))
1511 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1512 else
1513 napi_gro_frags(napi);
1514}
1515
1516/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001517static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1518 struct rx_ring *rx_ring,
1519 struct ib_mac_iocb_rsp *ib_mac_rsp,
1520 u32 length,
1521 u16 vlan_id)
1522{
1523 struct net_device *ndev = qdev->ndev;
1524 struct sk_buff *skb = NULL;
1525 void *addr;
1526 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1527 struct napi_struct *napi = &rx_ring->napi;
1528
1529 skb = netdev_alloc_skb(ndev, length);
1530 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001531 netif_err(qdev, drv, qdev->ndev,
1532 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001533 rx_ring->rx_dropped++;
1534 put_page(lbq_desc->p.pg_chunk.page);
1535 return;
1536 }
1537
1538 addr = lbq_desc->p.pg_chunk.va;
1539 prefetch(addr);
1540
1541
1542 /* Frame error, so drop the packet. */
1543 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001544 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001545 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001546 rx_ring->rx_errors++;
1547 goto err_out;
1548 }
1549
1550 /* The max framesize filter on this chip is set higher than
1551 * MTU since FCoE uses 2k frames.
1552 */
1553 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001554 netif_err(qdev, drv, qdev->ndev,
1555 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001556 rx_ring->rx_dropped++;
1557 goto err_out;
1558 }
1559 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001560 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1561 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001563 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1564 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565 length-ETH_HLEN);
1566 skb->len += length-ETH_HLEN;
1567 skb->data_len += length-ETH_HLEN;
1568 skb->truesize += length-ETH_HLEN;
1569
1570 rx_ring->rx_packets++;
1571 rx_ring->rx_bytes += skb->len;
1572 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001573 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001574
Michał Mirosław88230fd2011-04-18 13:31:21 +00001575 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001576 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577 /* TCP frame. */
1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001579 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1580 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001581 skb->ip_summed = CHECKSUM_UNNECESSARY;
1582 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1583 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1584 /* Unfragmented ipv4 UDP frame. */
1585 struct iphdr *iph = (struct iphdr *) skb->data;
1586 if (!(iph->frag_off &
1587 cpu_to_be16(IP_MF|IP_OFFSET))) {
1588 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001589 netif_printk(qdev, rx_status, KERN_DEBUG,
1590 qdev->ndev,
1591 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001592 }
1593 }
1594 }
1595
1596 skb_record_rx_queue(skb, rx_ring->cq_id);
1597 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1598 if (qdev->vlgrp && (vlan_id != 0xffff))
1599 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1600 else
1601 napi_gro_receive(napi, skb);
1602 } else {
1603 if (qdev->vlgrp && (vlan_id != 0xffff))
1604 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1605 else
1606 netif_receive_skb(skb);
1607 }
1608 return;
1609err_out:
1610 dev_kfree_skb_any(skb);
1611 put_page(lbq_desc->p.pg_chunk.page);
1612}
1613
1614/* Process an inbound completion from an rx ring. */
1615static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1616 struct rx_ring *rx_ring,
1617 struct ib_mac_iocb_rsp *ib_mac_rsp,
1618 u32 length,
1619 u16 vlan_id)
1620{
1621 struct net_device *ndev = qdev->ndev;
1622 struct sk_buff *skb = NULL;
1623 struct sk_buff *new_skb = NULL;
1624 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1625
1626 skb = sbq_desc->p.skb;
1627 /* Allocate new_skb and copy */
1628 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1629 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001630 netif_err(qdev, probe, qdev->ndev,
1631 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001632 rx_ring->rx_dropped++;
1633 return;
1634 }
1635 skb_reserve(new_skb, NET_IP_ALIGN);
1636 memcpy(skb_put(new_skb, length), skb->data, length);
1637 skb = new_skb;
1638
1639 /* Frame error, so drop the packet. */
1640 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001641 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001642 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001643 dev_kfree_skb_any(skb);
1644 rx_ring->rx_errors++;
1645 return;
1646 }
1647
1648 /* loopback self test for ethtool */
1649 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1650 ql_check_lb_frame(qdev, skb);
1651 dev_kfree_skb_any(skb);
1652 return;
1653 }
1654
1655 /* The max framesize filter on this chip is set higher than
1656 * MTU since FCoE uses 2k frames.
1657 */
1658 if (skb->len > ndev->mtu + ETH_HLEN) {
1659 dev_kfree_skb_any(skb);
1660 rx_ring->rx_dropped++;
1661 return;
1662 }
1663
1664 prefetch(skb->data);
1665 skb->dev = ndev;
1666 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001667 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668 "%s Multicast.\n",
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1671 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1672 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1673 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1674 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001675 }
1676 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001677 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1678 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001679
1680 rx_ring->rx_packets++;
1681 rx_ring->rx_bytes += skb->len;
1682 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001683 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001684
1685 /* If rx checksum is on, and there are no
1686 * csum or frame errors.
1687 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001688 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001689 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1690 /* TCP frame. */
1691 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001692 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1693 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001694 skb->ip_summed = CHECKSUM_UNNECESSARY;
1695 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1696 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1697 /* Unfragmented ipv4 UDP frame. */
1698 struct iphdr *iph = (struct iphdr *) skb->data;
1699 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001700 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001701 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001702 netif_printk(qdev, rx_status, KERN_DEBUG,
1703 qdev->ndev,
1704 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001705 }
1706 }
1707 }
1708
1709 skb_record_rx_queue(skb, rx_ring->cq_id);
1710 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1711 if (qdev->vlgrp && (vlan_id != 0xffff))
1712 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1713 vlan_id, skb);
1714 else
1715 napi_gro_receive(&rx_ring->napi, skb);
1716 } else {
1717 if (qdev->vlgrp && (vlan_id != 0xffff))
1718 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1719 else
1720 netif_receive_skb(skb);
1721 }
1722}
1723
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001724static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001725{
1726 void *temp_addr = skb->data;
1727
1728 /* Undo the skb_reserve(skb,32) we did before
1729 * giving to hardware, and realign data on
1730 * a 2-byte boundary.
1731 */
1732 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1733 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1734 skb_copy_to_linear_data(skb, temp_addr,
1735 (unsigned int)len);
1736}
1737
1738/*
1739 * This function builds an skb for the given inbound
1740 * completion. It will be rewritten for readability in the near
1741 * future, but for not it works well.
1742 */
1743static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1744 struct rx_ring *rx_ring,
1745 struct ib_mac_iocb_rsp *ib_mac_rsp)
1746{
1747 struct bq_desc *lbq_desc;
1748 struct bq_desc *sbq_desc;
1749 struct sk_buff *skb = NULL;
1750 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1751 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1752
1753 /*
1754 * Handle the header buffer if present.
1755 */
1756 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1757 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001758 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1759 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760 /*
1761 * Headers fit nicely into a small buffer.
1762 */
1763 sbq_desc = ql_get_curr_sbuf(rx_ring);
1764 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001765 dma_unmap_addr(sbq_desc, mapaddr),
1766 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001767 PCI_DMA_FROMDEVICE);
1768 skb = sbq_desc->p.skb;
1769 ql_realign_skb(skb, hdr_len);
1770 skb_put(skb, hdr_len);
1771 sbq_desc->p.skb = NULL;
1772 }
1773
1774 /*
1775 * Handle the data buffer(s).
1776 */
1777 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001778 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001780 return skb;
1781 }
1782
1783 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1784 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001785 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786 "Headers in small, data of %d bytes in small, combine them.\n",
1787 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001788 /*
1789 * Data is less than small buffer size so it's
1790 * stuffed in a small buffer.
1791 * For this case we append the data
1792 * from the "data" small buffer to the "header" small
1793 * buffer.
1794 */
1795 sbq_desc = ql_get_curr_sbuf(rx_ring);
1796 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001797 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001798 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001799 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001800 (sbq_desc, maplen),
1801 PCI_DMA_FROMDEVICE);
1802 memcpy(skb_put(skb, length),
1803 sbq_desc->p.skb->data, length);
1804 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001805 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 (sbq_desc,
1807 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001808 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001809 (sbq_desc,
1810 maplen),
1811 PCI_DMA_FROMDEVICE);
1812 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "%d bytes in a single small buffer.\n",
1815 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 sbq_desc = ql_get_curr_sbuf(rx_ring);
1817 skb = sbq_desc->p.skb;
1818 ql_realign_skb(skb, length);
1819 skb_put(skb, length);
1820 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001821 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001822 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001823 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 maplen),
1825 PCI_DMA_FROMDEVICE);
1826 sbq_desc->p.skb = NULL;
1827 }
1828 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1829 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001830 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831 "Header in small, %d bytes in large. Chain large to small!\n",
1832 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001833 /*
1834 * The data is in a single large buffer. We
1835 * chain it to the header buffer's skb and let
1836 * it rip.
1837 */
Ron Mercer7c734352009-10-19 03:32:19 +00001838 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001839 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1840 "Chaining page at offset = %d, for %d bytes to skb.\n",
1841 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001842 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1843 lbq_desc->p.pg_chunk.offset,
1844 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001845 skb->len += length;
1846 skb->data_len += length;
1847 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001848 } else {
1849 /*
1850 * The headers and data are in a single large buffer. We
1851 * copy it to a new skb and let it go. This can happen with
1852 * jumbo mtu on a non-TCP/UDP frame.
1853 */
Ron Mercer7c734352009-10-19 03:32:19 +00001854 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 skb = netdev_alloc_skb(qdev->ndev, length);
1856 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001857 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1858 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001859 return NULL;
1860 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001861 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001862 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001863 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001864 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001865 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001867 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1868 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1869 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001870 skb_fill_page_desc(skb, 0,
1871 lbq_desc->p.pg_chunk.page,
1872 lbq_desc->p.pg_chunk.offset,
1873 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001874 skb->len += length;
1875 skb->data_len += length;
1876 skb->truesize += length;
1877 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 __pskb_pull_tail(skb,
1879 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1880 VLAN_ETH_HLEN : ETH_HLEN);
1881 }
1882 } else {
1883 /*
1884 * The data is in a chain of large buffers
1885 * pointed to by a small buffer. We loop
1886 * thru and chain them to the our small header
1887 * buffer's skb.
1888 * frags: There are 18 max frags and our small
1889 * buffer will hold 32 of them. The thing is,
1890 * we'll use 3 max for our 9000 byte jumbo
1891 * frames. If the MTU goes up we could
1892 * eventually be in trouble.
1893 */
Ron Mercer7c734352009-10-19 03:32:19 +00001894 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895 sbq_desc = ql_get_curr_sbuf(rx_ring);
1896 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001897 dma_unmap_addr(sbq_desc, mapaddr),
1898 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899 PCI_DMA_FROMDEVICE);
1900 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1901 /*
1902 * This is an non TCP/UDP IP frame, so
1903 * the headers aren't split into a small
1904 * buffer. We have to use the small buffer
1905 * that contains our sg list as our skb to
1906 * send upstairs. Copy the sg list here to
1907 * a local buffer and use it to find the
1908 * pages to chain.
1909 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001910 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1911 "%d bytes of headers & data in chain of large.\n",
1912 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001913 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001914 sbq_desc->p.skb = NULL;
1915 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001916 }
1917 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001918 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1919 size = (length < rx_ring->lbq_buf_size) ? length :
1920 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001921
Joe Perchesae9540f72010-02-09 11:49:52 +00001922 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1923 "Adding page %d to skb for %d bytes.\n",
1924 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001925 skb_fill_page_desc(skb, i,
1926 lbq_desc->p.pg_chunk.page,
1927 lbq_desc->p.pg_chunk.offset,
1928 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929 skb->len += size;
1930 skb->data_len += size;
1931 skb->truesize += size;
1932 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001933 i++;
1934 }
1935 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1936 VLAN_ETH_HLEN : ETH_HLEN);
1937 }
1938 return skb;
1939}
1940
1941/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001942static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001943 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001944 struct ib_mac_iocb_rsp *ib_mac_rsp,
1945 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001946{
1947 struct net_device *ndev = qdev->ndev;
1948 struct sk_buff *skb = NULL;
1949
1950 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1951
1952 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1953 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001954 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1955 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001956 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001957 return;
1958 }
1959
Ron Mercera32959c2009-06-09 05:39:27 +00001960 /* Frame error, so drop the packet. */
1961 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001962 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001963 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001964 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001965 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001966 return;
1967 }
Ron Mercerec33a492009-06-09 05:39:28 +00001968
1969 /* The max framesize filter on this chip is set higher than
1970 * MTU since FCoE uses 2k frames.
1971 */
1972 if (skb->len > ndev->mtu + ETH_HLEN) {
1973 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001974 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001975 return;
1976 }
1977
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001978 /* loopback self test for ethtool */
1979 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1980 ql_check_lb_frame(qdev, skb);
1981 dev_kfree_skb_any(skb);
1982 return;
1983 }
1984
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001985 prefetch(skb->data);
1986 skb->dev = ndev;
1987 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001988 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1989 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1990 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1991 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1992 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1993 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1994 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001995 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001996 }
1997 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001998 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1999 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002000 }
Ron Mercerd555f592009-03-09 10:59:19 +00002001
Ron Mercerd555f592009-03-09 10:59:19 +00002002 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002003 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002004
2005 /* If rx checksum is on, and there are no
2006 * csum or frame errors.
2007 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00002008 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00002009 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2010 /* TCP frame. */
2011 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002012 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2013 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002014 skb->ip_summed = CHECKSUM_UNNECESSARY;
2015 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2016 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2017 /* Unfragmented ipv4 UDP frame. */
2018 struct iphdr *iph = (struct iphdr *) skb->data;
2019 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002020 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002021 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002022 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2023 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002024 }
2025 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002026 }
Ron Mercerd555f592009-03-09 10:59:19 +00002027
Ron Mercer885ee392009-11-03 13:49:31 +00002028 rx_ring->rx_packets++;
2029 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002030 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002031 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2032 if (qdev->vlgrp &&
2033 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2034 (vlan_id != 0))
2035 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2036 vlan_id, skb);
2037 else
2038 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002039 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002040 if (qdev->vlgrp &&
2041 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2042 (vlan_id != 0))
2043 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2044 else
2045 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002046 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002047}
2048
Ron Mercer4f848c02010-01-02 10:37:43 +00002049/* Process an inbound completion from an rx ring. */
2050static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2051 struct rx_ring *rx_ring,
2052 struct ib_mac_iocb_rsp *ib_mac_rsp)
2053{
2054 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2055 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2056 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2057 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2058
2059 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2060
2061 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2062 /* The data and headers are split into
2063 * separate buffers.
2064 */
2065 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 vlan_id);
2067 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2068 /* The data fit in a single small buffer.
2069 * Allocate a new skb, copy the data and
2070 * return the buffer to the free pool.
2071 */
2072 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2073 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002074 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2075 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2076 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2077 /* TCP packet in a page chunk that's been checksummed.
2078 * Tack it on to our GRO skb and let it go.
2079 */
2080 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2081 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002082 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2083 /* Non-TCP packet in a page chunk. Allocate an
2084 * skb, tack it on frags, and send it up.
2085 */
2086 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2087 length, vlan_id);
2088 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002089 /* Non-TCP/UDP large frames that span multiple buffers
2090 * can be processed corrrectly by the split frame logic.
2091 */
2092 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2093 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002094 }
2095
2096 return (unsigned long)length;
2097}
2098
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002099/* Process an outbound completion from an rx ring. */
2100static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2101 struct ob_mac_iocb_rsp *mac_rsp)
2102{
2103 struct tx_ring *tx_ring;
2104 struct tx_ring_desc *tx_ring_desc;
2105
2106 QL_DUMP_OB_MAC_RSP(mac_rsp);
2107 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2108 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2109 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002110 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2111 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002112 dev_kfree_skb(tx_ring_desc->skb);
2113 tx_ring_desc->skb = NULL;
2114
2115 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2116 OB_MAC_IOCB_RSP_S |
2117 OB_MAC_IOCB_RSP_L |
2118 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2119 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002120 netif_warn(qdev, tx_done, qdev->ndev,
2121 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 }
2123 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002124 netif_warn(qdev, tx_done, qdev->ndev,
2125 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 }
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002130 }
2131 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002132 netif_warn(qdev, tx_done, qdev->ndev,
2133 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002134 }
2135 }
2136 atomic_inc(&tx_ring->tx_count);
2137}
2138
2139/* Fire up a handler to reset the MPI processor. */
2140void ql_queue_fw_error(struct ql_adapter *qdev)
2141{
Ron Mercer6a473302009-07-02 06:06:12 +00002142 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002143 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2144}
2145
2146void ql_queue_asic_error(struct ql_adapter *qdev)
2147{
Ron Mercer6a473302009-07-02 06:06:12 +00002148 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002149 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002150 /* Clear adapter up bit to signal the recovery
2151 * process that it shouldn't kill the reset worker
2152 * thread
2153 */
2154 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002155 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2156}
2157
2158static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2159 struct ib_ae_iocb_rsp *ib_ae_rsp)
2160{
2161 switch (ib_ae_rsp->event) {
2162 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002163 netif_err(qdev, rx_err, qdev->ndev,
2164 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002165 ql_queue_fw_error(qdev);
2166 return;
2167
2168 case CAM_LOOKUP_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002169 netif_err(qdev, link, qdev->ndev,
2170 "Multiple CAM hits lookup occurred.\n");
2171 netif_err(qdev, drv, qdev->ndev,
2172 "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002173 ql_queue_asic_error(qdev);
2174 return;
2175
2176 case SOFT_ECC_ERROR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002177 netif_err(qdev, rx_err, qdev->ndev,
2178 "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002179 ql_queue_asic_error(qdev);
2180 break;
2181
2182 case PCI_ERR_ANON_BUF_RD:
Joe Perchesae9540f72010-02-09 11:49:52 +00002183 netif_err(qdev, rx_err, qdev->ndev,
2184 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2185 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002186 ql_queue_asic_error(qdev);
2187 break;
2188
2189 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002190 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2191 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002192 ql_queue_asic_error(qdev);
2193 break;
2194 }
2195}
2196
2197static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2198{
2199 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002200 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201 struct ob_mac_iocb_rsp *net_rsp = NULL;
2202 int count = 0;
2203
Ron Mercer1e213302009-03-09 10:59:21 +00002204 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002205 /* While there are entries in the completion queue. */
2206 while (prod != rx_ring->cnsmr_idx) {
2207
Joe Perchesae9540f72010-02-09 11:49:52 +00002208 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2209 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2210 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002211
2212 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2213 rmb();
2214 switch (net_rsp->opcode) {
2215
2216 case OPCODE_OB_MAC_TSO_IOCB:
2217 case OPCODE_OB_MAC_IOCB:
2218 ql_process_mac_tx_intr(qdev, net_rsp);
2219 break;
2220 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002221 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2222 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2223 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 }
2225 count++;
2226 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002227 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002228 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002229 if (!net_rsp)
2230 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002231 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002232 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002233 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002234 if (atomic_read(&tx_ring->queue_stopped) &&
2235 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2236 /*
2237 * The queue got stopped because the tx_ring was full.
2238 * Wake it up, because it's now at least 25% empty.
2239 */
Ron Mercer1e213302009-03-09 10:59:21 +00002240 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002241 }
2242
2243 return count;
2244}
2245
2246static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2247{
2248 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002249 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002250 struct ql_net_rsp_iocb *net_rsp;
2251 int count = 0;
2252
2253 /* While there are entries in the completion queue. */
2254 while (prod != rx_ring->cnsmr_idx) {
2255
Joe Perchesae9540f72010-02-09 11:49:52 +00002256 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2257 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2258 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002259
2260 net_rsp = rx_ring->curr_entry;
2261 rmb();
2262 switch (net_rsp->opcode) {
2263 case OPCODE_IB_MAC_IOCB:
2264 ql_process_mac_rx_intr(qdev, rx_ring,
2265 (struct ib_mac_iocb_rsp *)
2266 net_rsp);
2267 break;
2268
2269 case OPCODE_IB_AE_IOCB:
2270 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2271 net_rsp);
2272 break;
2273 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002274 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2275 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2276 net_rsp->opcode);
2277 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002278 }
2279 count++;
2280 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002281 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002282 if (count == budget)
2283 break;
2284 }
2285 ql_update_buffer_queues(qdev, rx_ring);
2286 ql_write_cq_idx(rx_ring);
2287 return count;
2288}
2289
2290static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2291{
2292 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2293 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002294 struct rx_ring *trx_ring;
2295 int i, work_done = 0;
2296 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002297
Joe Perchesae9540f72010-02-09 11:49:52 +00002298 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2299 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002300
Ron Mercer39aa8162009-08-27 11:02:11 +00002301 /* Service the TX rings first. They start
2302 * right after the RSS rings. */
2303 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2304 trx_ring = &qdev->rx_ring[i];
2305 /* If this TX completion ring belongs to this vector and
2306 * it's not empty then service it.
2307 */
2308 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2309 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2310 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002311 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2312 "%s: Servicing TX completion ring %d.\n",
2313 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002314 ql_clean_outbound_rx_ring(trx_ring);
2315 }
2316 }
2317
2318 /*
2319 * Now service the RSS ring if it's active.
2320 */
2321 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2322 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002323 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2324 "%s: Servicing RX completion ring %d.\n",
2325 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002326 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2327 }
2328
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002329 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002330 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002331 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2332 }
2333 return work_done;
2334}
2335
Ron Mercer01e6b952009-10-30 12:13:34 +00002336static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002337{
2338 struct ql_adapter *qdev = netdev_priv(ndev);
2339
2340 qdev->vlgrp = grp;
2341 if (grp) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002342 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2343 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002344 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2345 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2346 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00002347 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2348 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002349 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2350 }
2351}
2352
Ron Mercer01e6b952009-10-30 12:13:34 +00002353static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002354{
2355 struct ql_adapter *qdev = netdev_priv(ndev);
2356 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002357 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002358
Ron Mercercc288f52009-02-23 10:42:14 +00002359 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2360 if (status)
2361 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002362 if (ql_set_mac_addr_reg
2363 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002364 netif_err(qdev, ifup, qdev->ndev,
2365 "Failed to init vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002366 }
Ron Mercercc288f52009-02-23 10:42:14 +00002367 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002368}
2369
Ron Mercer01e6b952009-10-30 12:13:34 +00002370static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002371{
2372 struct ql_adapter *qdev = netdev_priv(ndev);
2373 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002374 int status;
2375
2376 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2377 if (status)
2378 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002379
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002380 if (ql_set_mac_addr_reg
2381 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002382 netif_err(qdev, ifup, qdev->ndev,
2383 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384 }
Ron Mercercc288f52009-02-23 10:42:14 +00002385 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002386
2387}
2388
Ron Mercerc1b60092010-10-27 04:58:12 +00002389static void qlge_restore_vlan(struct ql_adapter *qdev)
2390{
2391 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2392
2393 if (qdev->vlgrp) {
2394 u16 vid;
2395 for (vid = 0; vid < VLAN_N_VID; vid++) {
2396 if (!vlan_group_get_device(qdev->vlgrp, vid))
2397 continue;
2398 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2399 }
2400 }
2401}
2402
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002403/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2404static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2405{
2406 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002407 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002408 return IRQ_HANDLED;
2409}
2410
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002411/* This handles a fatal error, MPI activity, and the default
2412 * rx_ring in an MSI-X multiple vector environment.
2413 * In MSI/Legacy environment it also process the rest of
2414 * the rx_rings.
2415 */
2416static irqreturn_t qlge_isr(int irq, void *dev_id)
2417{
2418 struct rx_ring *rx_ring = dev_id;
2419 struct ql_adapter *qdev = rx_ring->qdev;
2420 struct intr_context *intr_context = &qdev->intr_context[0];
2421 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002422 int work_done = 0;
2423
Ron Mercerbb0d2152008-10-20 10:30:26 -07002424 spin_lock(&qdev->hw_lock);
2425 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002426 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2427 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002428 spin_unlock(&qdev->hw_lock);
2429 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002430 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002431 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002432
Ron Mercerbb0d2152008-10-20 10:30:26 -07002433 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434
2435 /*
2436 * Check for fatal error.
2437 */
2438 if (var & STS_FE) {
2439 ql_queue_asic_error(qdev);
Joe Perchesae9540f72010-02-09 11:49:52 +00002440 netif_err(qdev, intr, qdev->ndev,
2441 "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002442 var = ql_read32(qdev, ERR_STS);
Joe Perchesae9540f72010-02-09 11:49:52 +00002443 netif_err(qdev, intr, qdev->ndev,
2444 "Resetting chip. Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002445 return IRQ_HANDLED;
2446 }
2447
2448 /*
2449 * Check MPI processor activity.
2450 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002451 if ((var & STS_PI) &&
2452 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002453 /*
2454 * We've got an async event or mailbox completion.
2455 * Handle it and clear the source of the interrupt.
2456 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002457 netif_err(qdev, intr, qdev->ndev,
2458 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002459 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002460 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2461 queue_delayed_work_on(smp_processor_id(),
2462 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002463 work_done++;
2464 }
2465
2466 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002467 * Get the bit-mask that shows the active queues for this
2468 * pass. Compare it to the queues that this irq services
2469 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002470 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002471 var = ql_read32(qdev, ISR1);
2472 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002473 netif_info(qdev, intr, qdev->ndev,
2474 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002475 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002476 napi_schedule(&rx_ring->napi);
2477 work_done++;
2478 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002479 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002480 return work_done ? IRQ_HANDLED : IRQ_NONE;
2481}
2482
2483static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2484{
2485
2486 if (skb_is_gso(skb)) {
2487 int err;
2488 if (skb_header_cloned(skb)) {
2489 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2490 if (err)
2491 return err;
2492 }
2493
2494 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2495 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2496 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2497 mac_iocb_ptr->total_hdrs_len =
2498 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2499 mac_iocb_ptr->net_trans_offset =
2500 cpu_to_le16(skb_network_offset(skb) |
2501 skb_transport_offset(skb)
2502 << OB_MAC_TRANSPORT_HDR_SHIFT);
2503 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2504 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2505 if (likely(skb->protocol == htons(ETH_P_IP))) {
2506 struct iphdr *iph = ip_hdr(skb);
2507 iph->check = 0;
2508 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2509 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2510 iph->daddr, 0,
2511 IPPROTO_TCP,
2512 0);
2513 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2514 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2515 tcp_hdr(skb)->check =
2516 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2517 &ipv6_hdr(skb)->daddr,
2518 0, IPPROTO_TCP, 0);
2519 }
2520 return 1;
2521 }
2522 return 0;
2523}
2524
2525static void ql_hw_csum_setup(struct sk_buff *skb,
2526 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2527{
2528 int len;
2529 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002530 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002531 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2532 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2533 mac_iocb_ptr->net_trans_offset =
2534 cpu_to_le16(skb_network_offset(skb) |
2535 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2536
2537 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2538 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2539 if (likely(iph->protocol == IPPROTO_TCP)) {
2540 check = &(tcp_hdr(skb)->check);
2541 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2542 mac_iocb_ptr->total_hdrs_len =
2543 cpu_to_le16(skb_transport_offset(skb) +
2544 (tcp_hdr(skb)->doff << 2));
2545 } else {
2546 check = &(udp_hdr(skb)->check);
2547 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2548 mac_iocb_ptr->total_hdrs_len =
2549 cpu_to_le16(skb_transport_offset(skb) +
2550 sizeof(struct udphdr));
2551 }
2552 *check = ~csum_tcpudp_magic(iph->saddr,
2553 iph->daddr, len, iph->protocol, 0);
2554}
2555
Stephen Hemminger613573252009-08-31 19:50:58 +00002556static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002557{
2558 struct tx_ring_desc *tx_ring_desc;
2559 struct ob_mac_iocb_req *mac_iocb_ptr;
2560 struct ql_adapter *qdev = netdev_priv(ndev);
2561 int tso;
2562 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002563 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002564
2565 tx_ring = &qdev->tx_ring[tx_ring_idx];
2566
Ron Mercer74c50b42009-03-09 10:59:27 +00002567 if (skb_padto(skb, ETH_ZLEN))
2568 return NETDEV_TX_OK;
2569
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002570 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002571 netif_info(qdev, tx_queued, qdev->ndev,
2572 "%s: shutting down tx queue %d du to lack of resources.\n",
2573 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002574 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002575 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002576 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002577 return NETDEV_TX_BUSY;
2578 }
2579 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2580 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002581 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002582
2583 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2584 mac_iocb_ptr->tid = tx_ring_desc->index;
2585 /* We use the upper 32-bits to store the tx queue for this IO.
2586 * When we get the completion we can use it to establish the context.
2587 */
2588 mac_iocb_ptr->txq_idx = tx_ring_idx;
2589 tx_ring_desc->skb = skb;
2590
2591 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2592
Jesse Grosseab6d182010-10-20 13:56:03 +00002593 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002594 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2595 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002596 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2597 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2598 }
2599 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2600 if (tso < 0) {
2601 dev_kfree_skb_any(skb);
2602 return NETDEV_TX_OK;
2603 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2604 ql_hw_csum_setup(skb,
2605 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2606 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002607 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2608 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002609 netif_err(qdev, tx_queued, qdev->ndev,
2610 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002611 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002612 return NETDEV_TX_BUSY;
2613 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002614 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2615 tx_ring->prod_idx++;
2616 if (tx_ring->prod_idx == tx_ring->wq_len)
2617 tx_ring->prod_idx = 0;
2618 wmb();
2619
2620 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002621 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2622 "tx queued, slot %d, len %d\n",
2623 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002624
2625 atomic_dec(&tx_ring->tx_count);
2626 return NETDEV_TX_OK;
2627}
2628
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002629
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002630static void ql_free_shadow_space(struct ql_adapter *qdev)
2631{
2632 if (qdev->rx_ring_shadow_reg_area) {
2633 pci_free_consistent(qdev->pdev,
2634 PAGE_SIZE,
2635 qdev->rx_ring_shadow_reg_area,
2636 qdev->rx_ring_shadow_reg_dma);
2637 qdev->rx_ring_shadow_reg_area = NULL;
2638 }
2639 if (qdev->tx_ring_shadow_reg_area) {
2640 pci_free_consistent(qdev->pdev,
2641 PAGE_SIZE,
2642 qdev->tx_ring_shadow_reg_area,
2643 qdev->tx_ring_shadow_reg_dma);
2644 qdev->tx_ring_shadow_reg_area = NULL;
2645 }
2646}
2647
2648static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2649{
2650 qdev->rx_ring_shadow_reg_area =
2651 pci_alloc_consistent(qdev->pdev,
2652 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2653 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002654 netif_err(qdev, ifup, qdev->ndev,
2655 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002656 return -ENOMEM;
2657 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002658 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002659 qdev->tx_ring_shadow_reg_area =
2660 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2661 &qdev->tx_ring_shadow_reg_dma);
2662 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002663 netif_err(qdev, ifup, qdev->ndev,
2664 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002665 goto err_wqp_sh_area;
2666 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002667 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002668 return 0;
2669
2670err_wqp_sh_area:
2671 pci_free_consistent(qdev->pdev,
2672 PAGE_SIZE,
2673 qdev->rx_ring_shadow_reg_area,
2674 qdev->rx_ring_shadow_reg_dma);
2675 return -ENOMEM;
2676}
2677
2678static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2679{
2680 struct tx_ring_desc *tx_ring_desc;
2681 int i;
2682 struct ob_mac_iocb_req *mac_iocb_ptr;
2683
2684 mac_iocb_ptr = tx_ring->wq_base;
2685 tx_ring_desc = tx_ring->q;
2686 for (i = 0; i < tx_ring->wq_len; i++) {
2687 tx_ring_desc->index = i;
2688 tx_ring_desc->skb = NULL;
2689 tx_ring_desc->queue_entry = mac_iocb_ptr;
2690 mac_iocb_ptr++;
2691 tx_ring_desc++;
2692 }
2693 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2694 atomic_set(&tx_ring->queue_stopped, 0);
2695}
2696
2697static void ql_free_tx_resources(struct ql_adapter *qdev,
2698 struct tx_ring *tx_ring)
2699{
2700 if (tx_ring->wq_base) {
2701 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2702 tx_ring->wq_base, tx_ring->wq_base_dma);
2703 tx_ring->wq_base = NULL;
2704 }
2705 kfree(tx_ring->q);
2706 tx_ring->q = NULL;
2707}
2708
2709static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2710 struct tx_ring *tx_ring)
2711{
2712 tx_ring->wq_base =
2713 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2714 &tx_ring->wq_base_dma);
2715
Joe Perches8e95a202009-12-03 07:58:21 +00002716 if ((tx_ring->wq_base == NULL) ||
2717 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002718 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002719 return -ENOMEM;
2720 }
2721 tx_ring->q =
2722 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2723 if (tx_ring->q == NULL)
2724 goto err;
2725
2726 return 0;
2727err:
2728 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2729 tx_ring->wq_base, tx_ring->wq_base_dma);
2730 return -ENOMEM;
2731}
2732
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002733static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002734{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002735 struct bq_desc *lbq_desc;
2736
Ron Mercer7c734352009-10-19 03:32:19 +00002737 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002738
Ron Mercer7c734352009-10-19 03:32:19 +00002739 curr_idx = rx_ring->lbq_curr_idx;
2740 clean_idx = rx_ring->lbq_clean_idx;
2741 while (curr_idx != clean_idx) {
2742 lbq_desc = &rx_ring->lbq[curr_idx];
2743
2744 if (lbq_desc->p.pg_chunk.last_flag) {
2745 pci_unmap_page(qdev->pdev,
2746 lbq_desc->p.pg_chunk.map,
2747 ql_lbq_block_size(qdev),
2748 PCI_DMA_FROMDEVICE);
2749 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002750 }
Ron Mercer7c734352009-10-19 03:32:19 +00002751
2752 put_page(lbq_desc->p.pg_chunk.page);
2753 lbq_desc->p.pg_chunk.page = NULL;
2754
2755 if (++curr_idx == rx_ring->lbq_len)
2756 curr_idx = 0;
2757
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002758 }
2759}
2760
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002761static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002762{
2763 int i;
2764 struct bq_desc *sbq_desc;
2765
2766 for (i = 0; i < rx_ring->sbq_len; i++) {
2767 sbq_desc = &rx_ring->sbq[i];
2768 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002769 netif_err(qdev, ifup, qdev->ndev,
2770 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002771 return;
2772 }
2773 if (sbq_desc->p.skb) {
2774 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002775 dma_unmap_addr(sbq_desc, mapaddr),
2776 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002777 PCI_DMA_FROMDEVICE);
2778 dev_kfree_skb(sbq_desc->p.skb);
2779 sbq_desc->p.skb = NULL;
2780 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002781 }
2782}
2783
Ron Mercer4545a3f2009-02-23 10:42:17 +00002784/* Free all large and small rx buffers associated
2785 * with the completion queues for this device.
2786 */
2787static void ql_free_rx_buffers(struct ql_adapter *qdev)
2788{
2789 int i;
2790 struct rx_ring *rx_ring;
2791
2792 for (i = 0; i < qdev->rx_ring_count; i++) {
2793 rx_ring = &qdev->rx_ring[i];
2794 if (rx_ring->lbq)
2795 ql_free_lbq_buffers(qdev, rx_ring);
2796 if (rx_ring->sbq)
2797 ql_free_sbq_buffers(qdev, rx_ring);
2798 }
2799}
2800
2801static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2802{
2803 struct rx_ring *rx_ring;
2804 int i;
2805
2806 for (i = 0; i < qdev->rx_ring_count; i++) {
2807 rx_ring = &qdev->rx_ring[i];
2808 if (rx_ring->type != TX_Q)
2809 ql_update_buffer_queues(qdev, rx_ring);
2810 }
2811}
2812
2813static void ql_init_lbq_ring(struct ql_adapter *qdev,
2814 struct rx_ring *rx_ring)
2815{
2816 int i;
2817 struct bq_desc *lbq_desc;
2818 __le64 *bq = rx_ring->lbq_base;
2819
2820 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2821 for (i = 0; i < rx_ring->lbq_len; i++) {
2822 lbq_desc = &rx_ring->lbq[i];
2823 memset(lbq_desc, 0, sizeof(*lbq_desc));
2824 lbq_desc->index = i;
2825 lbq_desc->addr = bq;
2826 bq++;
2827 }
2828}
2829
2830static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002831 struct rx_ring *rx_ring)
2832{
2833 int i;
2834 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002835 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002836
Ron Mercer4545a3f2009-02-23 10:42:17 +00002837 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002838 for (i = 0; i < rx_ring->sbq_len; i++) {
2839 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002840 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002841 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002842 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002843 bq++;
2844 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002845}
2846
2847static void ql_free_rx_resources(struct ql_adapter *qdev,
2848 struct rx_ring *rx_ring)
2849{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002850 /* Free the small buffer queue. */
2851 if (rx_ring->sbq_base) {
2852 pci_free_consistent(qdev->pdev,
2853 rx_ring->sbq_size,
2854 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2855 rx_ring->sbq_base = NULL;
2856 }
2857
2858 /* Free the small buffer queue control blocks. */
2859 kfree(rx_ring->sbq);
2860 rx_ring->sbq = NULL;
2861
2862 /* Free the large buffer queue. */
2863 if (rx_ring->lbq_base) {
2864 pci_free_consistent(qdev->pdev,
2865 rx_ring->lbq_size,
2866 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2867 rx_ring->lbq_base = NULL;
2868 }
2869
2870 /* Free the large buffer queue control blocks. */
2871 kfree(rx_ring->lbq);
2872 rx_ring->lbq = NULL;
2873
2874 /* Free the rx queue. */
2875 if (rx_ring->cq_base) {
2876 pci_free_consistent(qdev->pdev,
2877 rx_ring->cq_size,
2878 rx_ring->cq_base, rx_ring->cq_base_dma);
2879 rx_ring->cq_base = NULL;
2880 }
2881}
2882
2883/* Allocate queues and buffers for this completions queue based
2884 * on the values in the parameter structure. */
2885static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2886 struct rx_ring *rx_ring)
2887{
2888
2889 /*
2890 * Allocate the completion queue for this rx_ring.
2891 */
2892 rx_ring->cq_base =
2893 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2894 &rx_ring->cq_base_dma);
2895
2896 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002897 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002898 return -ENOMEM;
2899 }
2900
2901 if (rx_ring->sbq_len) {
2902 /*
2903 * Allocate small buffer queue.
2904 */
2905 rx_ring->sbq_base =
2906 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2907 &rx_ring->sbq_base_dma);
2908
2909 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002910 netif_err(qdev, ifup, qdev->ndev,
2911 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002912 goto err_mem;
2913 }
2914
2915 /*
2916 * Allocate small buffer queue control blocks.
2917 */
2918 rx_ring->sbq =
2919 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2920 GFP_KERNEL);
2921 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002922 netif_err(qdev, ifup, qdev->ndev,
2923 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002924 goto err_mem;
2925 }
2926
Ron Mercer4545a3f2009-02-23 10:42:17 +00002927 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002928 }
2929
2930 if (rx_ring->lbq_len) {
2931 /*
2932 * Allocate large buffer queue.
2933 */
2934 rx_ring->lbq_base =
2935 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2936 &rx_ring->lbq_base_dma);
2937
2938 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002939 netif_err(qdev, ifup, qdev->ndev,
2940 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941 goto err_mem;
2942 }
2943 /*
2944 * Allocate large buffer queue control blocks.
2945 */
2946 rx_ring->lbq =
2947 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2948 GFP_KERNEL);
2949 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002950 netif_err(qdev, ifup, qdev->ndev,
2951 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002952 goto err_mem;
2953 }
2954
Ron Mercer4545a3f2009-02-23 10:42:17 +00002955 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002956 }
2957
2958 return 0;
2959
2960err_mem:
2961 ql_free_rx_resources(qdev, rx_ring);
2962 return -ENOMEM;
2963}
2964
2965static void ql_tx_ring_clean(struct ql_adapter *qdev)
2966{
2967 struct tx_ring *tx_ring;
2968 struct tx_ring_desc *tx_ring_desc;
2969 int i, j;
2970
2971 /*
2972 * Loop through all queues and free
2973 * any resources.
2974 */
2975 for (j = 0; j < qdev->tx_ring_count; j++) {
2976 tx_ring = &qdev->tx_ring[j];
2977 for (i = 0; i < tx_ring->wq_len; i++) {
2978 tx_ring_desc = &tx_ring->q[i];
2979 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002980 netif_err(qdev, ifdown, qdev->ndev,
2981 "Freeing lost SKB %p, from queue %d, index %d.\n",
2982 tx_ring_desc->skb, j,
2983 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002984 ql_unmap_send(qdev, tx_ring_desc,
2985 tx_ring_desc->map_cnt);
2986 dev_kfree_skb(tx_ring_desc->skb);
2987 tx_ring_desc->skb = NULL;
2988 }
2989 }
2990 }
2991}
2992
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002993static void ql_free_mem_resources(struct ql_adapter *qdev)
2994{
2995 int i;
2996
2997 for (i = 0; i < qdev->tx_ring_count; i++)
2998 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2999 for (i = 0; i < qdev->rx_ring_count; i++)
3000 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3001 ql_free_shadow_space(qdev);
3002}
3003
3004static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3005{
3006 int i;
3007
3008 /* Allocate space for our shadow registers and such. */
3009 if (ql_alloc_shadow_space(qdev))
3010 return -ENOMEM;
3011
3012 for (i = 0; i < qdev->rx_ring_count; i++) {
3013 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003014 netif_err(qdev, ifup, qdev->ndev,
3015 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003016 goto err_mem;
3017 }
3018 }
3019 /* Allocate tx queue resources */
3020 for (i = 0; i < qdev->tx_ring_count; i++) {
3021 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003022 netif_err(qdev, ifup, qdev->ndev,
3023 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003024 goto err_mem;
3025 }
3026 }
3027 return 0;
3028
3029err_mem:
3030 ql_free_mem_resources(qdev);
3031 return -ENOMEM;
3032}
3033
3034/* Set up the rx ring control block and pass it to the chip.
3035 * The control block is defined as
3036 * "Completion Queue Initialization Control Block", or cqicb.
3037 */
3038static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3039{
3040 struct cqicb *cqicb = &rx_ring->cqicb;
3041 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003042 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003043 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003044 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003045 void __iomem *doorbell_area =
3046 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3047 int err = 0;
3048 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003049 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003050 __le64 *base_indirect_ptr;
3051 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003052
3053 /* Set up the shadow registers for this ring. */
3054 rx_ring->prod_idx_sh_reg = shadow_reg;
3055 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003056 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003057 shadow_reg += sizeof(u64);
3058 shadow_reg_dma += sizeof(u64);
3059 rx_ring->lbq_base_indirect = shadow_reg;
3060 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003061 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3062 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003063 rx_ring->sbq_base_indirect = shadow_reg;
3064 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3065
3066 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003067 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003068 rx_ring->cnsmr_idx = 0;
3069 rx_ring->curr_entry = rx_ring->cq_base;
3070
3071 /* PCI doorbell mem area + 0x04 for valid register */
3072 rx_ring->valid_db_reg = doorbell_area + 0x04;
3073
3074 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003075 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076
3077 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003078 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003079
3080 memset((void *)cqicb, 0, sizeof(struct cqicb));
3081 cqicb->msix_vect = rx_ring->irq;
3082
Ron Mercer459caf52009-01-04 17:08:11 -08003083 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3084 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003085
Ron Mercer97345522009-01-09 11:31:50 +00003086 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003087
Ron Mercer97345522009-01-09 11:31:50 +00003088 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003089
3090 /*
3091 * Set up the control block load flags.
3092 */
3093 cqicb->flags = FLAGS_LC | /* Load queue base address */
3094 FLAGS_LV | /* Load MSI-X vector */
3095 FLAGS_LI; /* Load irq delay values */
3096 if (rx_ring->lbq_len) {
3097 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003098 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003099 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3100 page_entries = 0;
3101 do {
3102 *base_indirect_ptr = cpu_to_le64(tmp);
3103 tmp += DB_PAGE_SIZE;
3104 base_indirect_ptr++;
3105 page_entries++;
3106 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003107 cqicb->lbq_addr =
3108 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003109 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3110 (u16) rx_ring->lbq_buf_size;
3111 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3112 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3113 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003114 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003115 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003116 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003117 rx_ring->lbq_clean_idx = 0;
3118 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003119 }
3120 if (rx_ring->sbq_len) {
3121 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003122 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003123 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3124 page_entries = 0;
3125 do {
3126 *base_indirect_ptr = cpu_to_le64(tmp);
3127 tmp += DB_PAGE_SIZE;
3128 base_indirect_ptr++;
3129 page_entries++;
3130 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003131 cqicb->sbq_addr =
3132 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003133 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003134 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003135 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3136 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003137 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003138 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003139 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003140 rx_ring->sbq_clean_idx = 0;
3141 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 }
3143 switch (rx_ring->type) {
3144 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003145 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3146 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3147 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003148 case RX_Q:
3149 /* Inbound completion handling rx_rings run in
3150 * separate NAPI contexts.
3151 */
3152 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3153 64);
3154 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3155 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3156 break;
3157 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003158 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3159 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003160 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003161 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3162 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003163 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3164 CFG_LCQ, rx_ring->cq_id);
3165 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003166 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003167 return err;
3168 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003169 return err;
3170}
3171
3172static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3173{
3174 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3175 void __iomem *doorbell_area =
3176 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3177 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3178 (tx_ring->wq_id * sizeof(u64));
3179 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3180 (tx_ring->wq_id * sizeof(u64));
3181 int err = 0;
3182
3183 /*
3184 * Assign doorbell registers for this tx_ring.
3185 */
3186 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003187 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003188 tx_ring->prod_idx = 0;
3189 /* TX PCI doorbell mem area + 0x04 */
3190 tx_ring->valid_db_reg = doorbell_area + 0x04;
3191
3192 /*
3193 * Assign shadow registers for this tx_ring.
3194 */
3195 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3196 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3197
3198 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3199 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3200 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3201 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3202 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003203 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003204
Ron Mercer97345522009-01-09 11:31:50 +00003205 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003206
3207 ql_init_tx_ring(qdev, tx_ring);
3208
Ron Mercere3324712009-07-02 06:06:13 +00003209 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003210 (u16) tx_ring->wq_id);
3211 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003212 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003213 return err;
3214 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003215 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3216 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003217 return err;
3218}
3219
3220static void ql_disable_msix(struct ql_adapter *qdev)
3221{
3222 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3223 pci_disable_msix(qdev->pdev);
3224 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3225 kfree(qdev->msi_x_entry);
3226 qdev->msi_x_entry = NULL;
3227 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3228 pci_disable_msi(qdev->pdev);
3229 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3230 }
3231}
3232
Ron Mercera4ab6132009-08-27 11:02:10 +00003233/* We start by trying to get the number of vectors
3234 * stored in qdev->intr_count. If we don't get that
3235 * many then we reduce the count and try again.
3236 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003237static void ql_enable_msix(struct ql_adapter *qdev)
3238{
Ron Mercera4ab6132009-08-27 11:02:10 +00003239 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003241 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003242 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003243 /* Try to alloc space for the msix struct,
3244 * if it fails then go to MSI/legacy.
3245 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003246 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003247 sizeof(struct msix_entry),
3248 GFP_KERNEL);
3249 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003250 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003251 goto msi;
3252 }
3253
Ron Mercera4ab6132009-08-27 11:02:10 +00003254 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003255 qdev->msi_x_entry[i].entry = i;
3256
Ron Mercera4ab6132009-08-27 11:02:10 +00003257 /* Loop to get our vectors. We start with
3258 * what we want and settle for what we get.
3259 */
3260 do {
3261 err = pci_enable_msix(qdev->pdev,
3262 qdev->msi_x_entry, qdev->intr_count);
3263 if (err > 0)
3264 qdev->intr_count = err;
3265 } while (err > 0);
3266
3267 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003268 kfree(qdev->msi_x_entry);
3269 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003270 netif_warn(qdev, ifup, qdev->ndev,
3271 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003272 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003273 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003274 } else if (err == 0) {
3275 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003276 netif_info(qdev, ifup, qdev->ndev,
3277 "MSI-X Enabled, got %d vectors.\n",
3278 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003279 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003280 }
3281 }
3282msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003283 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003284 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003285 if (!pci_enable_msi(qdev->pdev)) {
3286 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003287 netif_info(qdev, ifup, qdev->ndev,
3288 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003289 return;
3290 }
3291 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003292 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003293 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3294 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003295}
3296
Ron Mercer39aa8162009-08-27 11:02:11 +00003297/* Each vector services 1 RSS ring and and 1 or more
3298 * TX completion rings. This function loops through
3299 * the TX completion rings and assigns the vector that
3300 * will service it. An example would be if there are
3301 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3302 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003303 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003304 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3305 */
3306static void ql_set_tx_vect(struct ql_adapter *qdev)
3307{
3308 int i, j, vect;
3309 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3310
3311 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3312 /* Assign irq vectors to TX rx_rings.*/
3313 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3314 i < qdev->rx_ring_count; i++) {
3315 if (j == tx_rings_per_vector) {
3316 vect++;
3317 j = 0;
3318 }
3319 qdev->rx_ring[i].irq = vect;
3320 j++;
3321 }
3322 } else {
3323 /* For single vector all rings have an irq
3324 * of zero.
3325 */
3326 for (i = 0; i < qdev->rx_ring_count; i++)
3327 qdev->rx_ring[i].irq = 0;
3328 }
3329}
3330
3331/* Set the interrupt mask for this vector. Each vector
3332 * will service 1 RSS ring and 1 or more TX completion
3333 * rings. This function sets up a bit mask per vector
3334 * that indicates which rings it services.
3335 */
3336static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3337{
3338 int j, vect = ctx->intr;
3339 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3340
3341 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3342 /* Add the RSS ring serviced by this vector
3343 * to the mask.
3344 */
3345 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3346 /* Add the TX ring(s) serviced by this vector
3347 * to the mask. */
3348 for (j = 0; j < tx_rings_per_vector; j++) {
3349 ctx->irq_mask |=
3350 (1 << qdev->rx_ring[qdev->rss_ring_count +
3351 (vect * tx_rings_per_vector) + j].cq_id);
3352 }
3353 } else {
3354 /* For single vector we just shift each queue's
3355 * ID into the mask.
3356 */
3357 for (j = 0; j < qdev->rx_ring_count; j++)
3358 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3359 }
3360}
3361
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003362/*
3363 * Here we build the intr_context structures based on
3364 * our rx_ring count and intr vector count.
3365 * The intr_context structure is used to hook each vector
3366 * to possibly different handlers.
3367 */
3368static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3369{
3370 int i = 0;
3371 struct intr_context *intr_context = &qdev->intr_context[0];
3372
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003373 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3374 /* Each rx_ring has it's
3375 * own intr_context since we have separate
3376 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003377 */
3378 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3379 qdev->rx_ring[i].irq = i;
3380 intr_context->intr = i;
3381 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003382 /* Set up this vector's bit-mask that indicates
3383 * which queues it services.
3384 */
3385 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003386 /*
3387 * We set up each vectors enable/disable/read bits so
3388 * there's no bit/mask calculations in the critical path.
3389 */
3390 intr_context->intr_en_mask =
3391 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3392 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3393 | i;
3394 intr_context->intr_dis_mask =
3395 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3396 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3397 INTR_EN_IHD | i;
3398 intr_context->intr_read_mask =
3399 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3400 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3401 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003402 if (i == 0) {
3403 /* The first vector/queue handles
3404 * broadcast/multicast, fatal errors,
3405 * and firmware events. This in addition
3406 * to normal inbound NAPI processing.
3407 */
3408 intr_context->handler = qlge_isr;
3409 sprintf(intr_context->name, "%s-rx-%d",
3410 qdev->ndev->name, i);
3411 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003412 /*
3413 * Inbound queues handle unicast frames only.
3414 */
3415 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003416 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003417 qdev->ndev->name, i);
3418 }
3419 }
3420 } else {
3421 /*
3422 * All rx_rings use the same intr_context since
3423 * there is only one vector.
3424 */
3425 intr_context->intr = 0;
3426 intr_context->qdev = qdev;
3427 /*
3428 * We set up each vectors enable/disable/read bits so
3429 * there's no bit/mask calculations in the critical path.
3430 */
3431 intr_context->intr_en_mask =
3432 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3433 intr_context->intr_dis_mask =
3434 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3435 INTR_EN_TYPE_DISABLE;
3436 intr_context->intr_read_mask =
3437 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3438 /*
3439 * Single interrupt means one handler for all rings.
3440 */
3441 intr_context->handler = qlge_isr;
3442 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003443 /* Set up this vector's bit-mask that indicates
3444 * which queues it services. In this case there is
3445 * a single vector so it will service all RSS and
3446 * TX completion rings.
3447 */
3448 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003449 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003450 /* Tell the TX completion rings which MSIx vector
3451 * they will be using.
3452 */
3453 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003454}
3455
3456static void ql_free_irq(struct ql_adapter *qdev)
3457{
3458 int i;
3459 struct intr_context *intr_context = &qdev->intr_context[0];
3460
3461 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3462 if (intr_context->hooked) {
3463 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3464 free_irq(qdev->msi_x_entry[i].vector,
3465 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003466 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3467 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003468 } else {
3469 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003470 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3471 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003472 }
3473 }
3474 }
3475 ql_disable_msix(qdev);
3476}
3477
3478static int ql_request_irq(struct ql_adapter *qdev)
3479{
3480 int i;
3481 int status = 0;
3482 struct pci_dev *pdev = qdev->pdev;
3483 struct intr_context *intr_context = &qdev->intr_context[0];
3484
3485 ql_resolve_queues_to_irqs(qdev);
3486
3487 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3488 atomic_set(&intr_context->irq_cnt, 0);
3489 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3490 status = request_irq(qdev->msi_x_entry[i].vector,
3491 intr_context->handler,
3492 0,
3493 intr_context->name,
3494 &qdev->rx_ring[i]);
3495 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003496 netif_err(qdev, ifup, qdev->ndev,
3497 "Failed request for MSIX interrupt %d.\n",
3498 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003499 goto err_irq;
3500 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003501 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3502 "Hooked intr %d, queue type %s, with name %s.\n",
3503 i,
3504 qdev->rx_ring[i].type == DEFAULT_Q ?
3505 "DEFAULT_Q" :
3506 qdev->rx_ring[i].type == TX_Q ?
3507 "TX_Q" :
3508 qdev->rx_ring[i].type == RX_Q ?
3509 "RX_Q" : "",
3510 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003511 }
3512 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003513 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3514 "trying msi or legacy interrupts.\n");
3515 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3516 "%s: irq = %d.\n", __func__, pdev->irq);
3517 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3518 "%s: context->name = %s.\n", __func__,
3519 intr_context->name);
3520 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3521 "%s: dev_id = 0x%p.\n", __func__,
3522 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003523 status =
3524 request_irq(pdev->irq, qlge_isr,
3525 test_bit(QL_MSI_ENABLED,
3526 &qdev->
3527 flags) ? 0 : IRQF_SHARED,
3528 intr_context->name, &qdev->rx_ring[0]);
3529 if (status)
3530 goto err_irq;
3531
Joe Perchesae9540f72010-02-09 11:49:52 +00003532 netif_err(qdev, ifup, qdev->ndev,
3533 "Hooked intr %d, queue type %s, with name %s.\n",
3534 i,
3535 qdev->rx_ring[0].type == DEFAULT_Q ?
3536 "DEFAULT_Q" :
3537 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3538 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3539 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540 }
3541 intr_context->hooked = 1;
3542 }
3543 return status;
3544err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003545 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003546 ql_free_irq(qdev);
3547 return status;
3548}
3549
3550static int ql_start_rss(struct ql_adapter *qdev)
3551{
Joe Perches215faf92010-12-21 02:16:10 -08003552 static const u8 init_hash_seed[] = {
3553 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3554 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3555 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3556 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3557 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3558 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 struct ricb *ricb = &qdev->ricb;
3560 int status = 0;
3561 int i;
3562 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3563
Ron Mercere3324712009-07-02 06:06:13 +00003564 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565
Ron Mercerb2014ff2009-08-27 11:02:09 +00003566 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003567 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003568 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3569 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003570
3571 /*
3572 * Fill out the Indirection Table.
3573 */
Ron Mercer541ae282009-10-08 09:54:37 +00003574 for (i = 0; i < 1024; i++)
3575 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003576
Ron Mercer541ae282009-10-08 09:54:37 +00003577 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3578 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003579
Joe Perchesae9540f72010-02-09 11:49:52 +00003580 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003581
Ron Mercere3324712009-07-02 06:06:13 +00003582 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003583 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003584 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003585 return status;
3586 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003587 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3588 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003589 return status;
3590}
3591
Ron Mercera5f59dc2009-07-02 06:06:07 +00003592static int ql_clear_routing_entries(struct ql_adapter *qdev)
3593{
3594 int i, status = 0;
3595
3596 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3597 if (status)
3598 return status;
3599 /* Clear all the entries in the routing table. */
3600 for (i = 0; i < 16; i++) {
3601 status = ql_set_routing_reg(qdev, i, 0, 0);
3602 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003603 netif_err(qdev, ifup, qdev->ndev,
3604 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003605 break;
3606 }
3607 }
3608 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3609 return status;
3610}
3611
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003612/* Initialize the frame-to-queue routing. */
3613static int ql_route_initialize(struct ql_adapter *qdev)
3614{
3615 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003616
3617 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003618 status = ql_clear_routing_entries(qdev);
3619 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003620 return status;
3621
3622 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3623 if (status)
3624 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003625
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003626 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3627 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003628 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003629 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003630 "Failed to init routing register "
3631 "for IP CSUM error packets.\n");
3632 goto exit;
3633 }
3634 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3635 RT_IDX_TU_CSUM_ERR, 1);
3636 if (status) {
3637 netif_err(qdev, ifup, qdev->ndev,
3638 "Failed to init routing register "
3639 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003640 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641 }
3642 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3643 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003644 netif_err(qdev, ifup, qdev->ndev,
3645 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003646 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003647 }
3648 /* If we have more than one inbound queue, then turn on RSS in the
3649 * routing block.
3650 */
3651 if (qdev->rss_ring_count > 1) {
3652 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3653 RT_IDX_RSS_MATCH, 1);
3654 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003655 netif_err(qdev, ifup, qdev->ndev,
3656 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003657 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003658 }
3659 }
3660
3661 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3662 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003663 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003664 netif_err(qdev, ifup, qdev->ndev,
3665 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003666exit:
3667 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003668 return status;
3669}
3670
Ron Mercer2ee1e272009-03-03 12:10:33 +00003671int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003672{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003673 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003674
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003675 /* If check if the link is up and use to
3676 * determine if we are setting or clearing
3677 * the MAC address in the CAM.
3678 */
3679 set = ql_read32(qdev, STS);
3680 set &= qdev->port_link_up;
3681 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003682 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003683 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003684 return status;
3685 }
3686
3687 status = ql_route_initialize(qdev);
3688 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003689 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003690
3691 return status;
3692}
3693
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003694static int ql_adapter_initialize(struct ql_adapter *qdev)
3695{
3696 u32 value, mask;
3697 int i;
3698 int status = 0;
3699
3700 /*
3701 * Set up the System register to halt on errors.
3702 */
3703 value = SYS_EFE | SYS_FAE;
3704 mask = value << 16;
3705 ql_write32(qdev, SYS, mask | value);
3706
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003707 /* Set the default queue, and VLAN behavior. */
3708 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3709 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003710 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3711
3712 /* Set the MPI interrupt to enabled. */
3713 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3714
3715 /* Enable the function, set pagesize, enable error checking. */
3716 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003717 FSC_EC | FSC_VM_PAGE_4K;
3718 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003719
3720 /* Set/clear header splitting. */
3721 mask = FSC_VM_PAGESIZE_MASK |
3722 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3723 ql_write32(qdev, FSC, mask | value);
3724
Ron Mercer572c5262010-01-02 10:37:42 +00003725 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003726
Ron Mercera3b71932009-10-08 09:54:38 +00003727 /* Set RX packet routing to use port/pci function on which the
3728 * packet arrived on in addition to usual frame routing.
3729 * This is helpful on bonding where both interfaces can have
3730 * the same MAC address.
3731 */
3732 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003733 /* Reroute all packets to our Interface.
3734 * They may have been routed to MPI firmware
3735 * due to WOL.
3736 */
3737 value = ql_read32(qdev, MGMT_RCV_CFG);
3738 value &= ~MGMT_RCV_CFG_RM;
3739 mask = 0xffff0000;
3740
3741 /* Sticky reg needs clearing due to WOL. */
3742 ql_write32(qdev, MGMT_RCV_CFG, mask);
3743 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3744
3745 /* Default WOL is enable on Mezz cards */
3746 if (qdev->pdev->subsystem_device == 0x0068 ||
3747 qdev->pdev->subsystem_device == 0x0180)
3748 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003749
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003750 /* Start up the rx queues. */
3751 for (i = 0; i < qdev->rx_ring_count; i++) {
3752 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3753 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003754 netif_err(qdev, ifup, qdev->ndev,
3755 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003756 return status;
3757 }
3758 }
3759
3760 /* If there is more than one inbound completion queue
3761 * then download a RICB to configure RSS.
3762 */
3763 if (qdev->rss_ring_count > 1) {
3764 status = ql_start_rss(qdev);
3765 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003766 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003767 return status;
3768 }
3769 }
3770
3771 /* Start up the tx queues. */
3772 for (i = 0; i < qdev->tx_ring_count; i++) {
3773 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3774 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003775 netif_err(qdev, ifup, qdev->ndev,
3776 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003777 return status;
3778 }
3779 }
3780
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003781 /* Initialize the port and set the max framesize. */
3782 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003783 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003784 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003785
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003786 /* Set up the MAC address and frame routing filter. */
3787 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003788 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003789 netif_err(qdev, ifup, qdev->ndev,
3790 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003791 return status;
3792 }
3793
3794 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003795 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003796 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3797 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003798 napi_enable(&qdev->rx_ring[i].napi);
3799 }
3800
3801 return status;
3802}
3803
3804/* Issue soft reset to chip. */
3805static int ql_adapter_reset(struct ql_adapter *qdev)
3806{
3807 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003809 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003810
Ron Mercera5f59dc2009-07-02 06:06:07 +00003811 /* Clear all the entries in the routing table. */
3812 status = ql_clear_routing_entries(qdev);
3813 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003814 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003815 return status;
3816 }
3817
3818 end_jiffies = jiffies +
3819 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003820
3821 /* Stop management traffic. */
3822 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3823
3824 /* Wait for the NIC and MGMNT FIFOs to empty. */
3825 ql_wait_fifo_empty(qdev);
3826
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003827 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003828
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003829 do {
3830 value = ql_read32(qdev, RST_FO);
3831 if ((value & RST_FO_FR) == 0)
3832 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003833 cpu_relax();
3834 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003835
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003836 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003837 netif_err(qdev, ifdown, qdev->ndev,
3838 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003839 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003840 }
3841
Ron Mercer84087f42009-10-08 09:54:41 +00003842 /* Resume management traffic. */
3843 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003844 return status;
3845}
3846
3847static void ql_display_dev_info(struct net_device *ndev)
3848{
Joe Perchesb16fed02010-11-15 11:12:28 +00003849 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003850
Joe Perchesae9540f72010-02-09 11:49:52 +00003851 netif_info(qdev, probe, qdev->ndev,
3852 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3853 "XG Roll = %d, XG Rev = %d.\n",
3854 qdev->func,
3855 qdev->port,
3856 qdev->chip_rev_id & 0x0000000f,
3857 qdev->chip_rev_id >> 4 & 0x0000000f,
3858 qdev->chip_rev_id >> 8 & 0x0000000f,
3859 qdev->chip_rev_id >> 12 & 0x0000000f);
3860 netif_info(qdev, probe, qdev->ndev,
3861 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003862}
3863
stephen hemmingerac409212010-10-21 07:50:54 +00003864static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003865{
3866 int status = 0;
3867 u32 wol = MB_WOL_DISABLE;
3868
3869 /* The CAM is still intact after a reset, but if we
3870 * are doing WOL, then we may need to program the
3871 * routing regs. We would also need to issue the mailbox
3872 * commands to instruct the MPI what to do per the ethtool
3873 * settings.
3874 */
3875
3876 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3877 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003878 netif_err(qdev, ifdown, qdev->ndev,
3879 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3880 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003881 return -EINVAL;
3882 }
3883
3884 if (qdev->wol & WAKE_MAGIC) {
3885 status = ql_mb_wol_set_magic(qdev, 1);
3886 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003887 netif_err(qdev, ifdown, qdev->ndev,
3888 "Failed to set magic packet on %s.\n",
3889 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003890 return status;
3891 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003892 netif_info(qdev, drv, qdev->ndev,
3893 "Enabled magic packet successfully on %s.\n",
3894 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003895
3896 wol |= MB_WOL_MAGIC_PKT;
3897 }
3898
3899 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003900 wol |= MB_WOL_MODE_ON;
3901 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003902 netif_err(qdev, drv, qdev->ndev,
3903 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003904 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003905 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003906 }
3907
3908 return status;
3909}
3910
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003911static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003912{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003913
Ron Mercer6497b602009-02-12 16:37:13 -08003914 /* Don't kill the reset worker thread if we
3915 * are in the process of recovery.
3916 */
3917 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3918 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003919 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3920 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003921 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003922 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003923 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003924}
3925
3926static int ql_adapter_down(struct ql_adapter *qdev)
3927{
3928 int i, status = 0;
3929
3930 ql_link_off(qdev);
3931
3932 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933
Ron Mercer39aa8162009-08-27 11:02:11 +00003934 for (i = 0; i < qdev->rss_ring_count; i++)
3935 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003936
3937 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3938
3939 ql_disable_interrupts(qdev);
3940
3941 ql_tx_ring_clean(qdev);
3942
Ron Mercer6b318cb2009-03-09 10:59:26 +00003943 /* Call netif_napi_del() from common point.
3944 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003945 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003946 netif_napi_del(&qdev->rx_ring[i].napi);
3947
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003948 status = ql_adapter_reset(qdev);
3949 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003950 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3951 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003952 ql_free_rx_buffers(qdev);
3953
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954 return status;
3955}
3956
3957static int ql_adapter_up(struct ql_adapter *qdev)
3958{
3959 int err = 0;
3960
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003961 err = ql_adapter_initialize(qdev);
3962 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003963 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003964 goto err_init;
3965 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003966 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003967 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003968 /* If the port is initialized and the
3969 * link is up the turn on the carrier.
3970 */
3971 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3972 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003973 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003974 /* Restore rx mode. */
3975 clear_bit(QL_ALLMULTI, &qdev->flags);
3976 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3977 qlge_set_multicast_list(qdev->ndev);
3978
Ron Mercerc1b60092010-10-27 04:58:12 +00003979 /* Restore vlan setting. */
3980 qlge_restore_vlan(qdev);
3981
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003982 ql_enable_interrupts(qdev);
3983 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003984 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003985
3986 return 0;
3987err_init:
3988 ql_adapter_reset(qdev);
3989 return err;
3990}
3991
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003992static void ql_release_adapter_resources(struct ql_adapter *qdev)
3993{
3994 ql_free_mem_resources(qdev);
3995 ql_free_irq(qdev);
3996}
3997
3998static int ql_get_adapter_resources(struct ql_adapter *qdev)
3999{
4000 int status = 0;
4001
4002 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004003 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004004 return -ENOMEM;
4005 }
4006 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004007 return status;
4008}
4009
4010static int qlge_close(struct net_device *ndev)
4011{
4012 struct ql_adapter *qdev = netdev_priv(ndev);
4013
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004014 /* If we hit pci_channel_io_perm_failure
4015 * failure condition, then we already
4016 * brought the adapter down.
4017 */
4018 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004019 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004020 clear_bit(QL_EEH_FATAL, &qdev->flags);
4021 return 0;
4022 }
4023
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004024 /*
4025 * Wait for device to recover from a reset.
4026 * (Rarely happens, but possible.)
4027 */
4028 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4029 msleep(1);
4030 ql_adapter_down(qdev);
4031 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004032 return 0;
4033}
4034
4035static int ql_configure_rings(struct ql_adapter *qdev)
4036{
4037 int i;
4038 struct rx_ring *rx_ring;
4039 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004040 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004041 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4042 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4043
4044 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004045
Ron Mercera4ab6132009-08-27 11:02:10 +00004046 /* In a perfect world we have one RSS ring for each CPU
4047 * and each has it's own vector. To do that we ask for
4048 * cpu_cnt vectors. ql_enable_msix() will adjust the
4049 * vector count to what we actually get. We then
4050 * allocate an RSS ring for each.
4051 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004052 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004053 qdev->intr_count = cpu_cnt;
4054 ql_enable_msix(qdev);
4055 /* Adjust the RSS ring count to the actual vector count. */
4056 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004057 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004058 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004059
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004060 for (i = 0; i < qdev->tx_ring_count; i++) {
4061 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004062 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004063 tx_ring->qdev = qdev;
4064 tx_ring->wq_id = i;
4065 tx_ring->wq_len = qdev->tx_ring_size;
4066 tx_ring->wq_size =
4067 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4068
4069 /*
4070 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004071 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004072 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004073 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004074 }
4075
4076 for (i = 0; i < qdev->rx_ring_count; i++) {
4077 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004078 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004079 rx_ring->qdev = qdev;
4080 rx_ring->cq_id = i;
4081 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004082 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004083 /*
4084 * Inbound (RSS) queues.
4085 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004086 rx_ring->cq_len = qdev->rx_ring_size;
4087 rx_ring->cq_size =
4088 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4089 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4090 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004091 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004092 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004093 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4094 "lbq_buf_size %d, order = %d\n",
4095 rx_ring->lbq_buf_size,
4096 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004097 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4098 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004099 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004100 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004101 rx_ring->type = RX_Q;
4102 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004103 /*
4104 * Outbound queue handles outbound completions only.
4105 */
4106 /* outbound cq is same size as tx_ring it services. */
4107 rx_ring->cq_len = qdev->tx_ring_size;
4108 rx_ring->cq_size =
4109 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4110 rx_ring->lbq_len = 0;
4111 rx_ring->lbq_size = 0;
4112 rx_ring->lbq_buf_size = 0;
4113 rx_ring->sbq_len = 0;
4114 rx_ring->sbq_size = 0;
4115 rx_ring->sbq_buf_size = 0;
4116 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004117 }
4118 }
4119 return 0;
4120}
4121
4122static int qlge_open(struct net_device *ndev)
4123{
4124 int err = 0;
4125 struct ql_adapter *qdev = netdev_priv(ndev);
4126
Ron Mercer74e12432009-11-11 12:54:04 +00004127 err = ql_adapter_reset(qdev);
4128 if (err)
4129 return err;
4130
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004131 err = ql_configure_rings(qdev);
4132 if (err)
4133 return err;
4134
4135 err = ql_get_adapter_resources(qdev);
4136 if (err)
4137 goto error_up;
4138
4139 err = ql_adapter_up(qdev);
4140 if (err)
4141 goto error_up;
4142
4143 return err;
4144
4145error_up:
4146 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004147 return err;
4148}
4149
Ron Mercer7c734352009-10-19 03:32:19 +00004150static int ql_change_rx_buffers(struct ql_adapter *qdev)
4151{
4152 struct rx_ring *rx_ring;
4153 int i, status;
4154 u32 lbq_buf_len;
4155
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004156 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004157 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4158 int i = 3;
4159 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004160 netif_err(qdev, ifup, qdev->ndev,
4161 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004162 ssleep(1);
4163 }
4164
4165 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004166 netif_err(qdev, ifup, qdev->ndev,
4167 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004168 return -ETIMEDOUT;
4169 }
4170 }
4171
4172 status = ql_adapter_down(qdev);
4173 if (status)
4174 goto error;
4175
4176 /* Get the new rx buffer size. */
4177 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4178 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4179 qdev->lbq_buf_order = get_order(lbq_buf_len);
4180
4181 for (i = 0; i < qdev->rss_ring_count; i++) {
4182 rx_ring = &qdev->rx_ring[i];
4183 /* Set the new size. */
4184 rx_ring->lbq_buf_size = lbq_buf_len;
4185 }
4186
4187 status = ql_adapter_up(qdev);
4188 if (status)
4189 goto error;
4190
4191 return status;
4192error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004193 netif_alert(qdev, ifup, qdev->ndev,
4194 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004195 set_bit(QL_ADAPTER_UP, &qdev->flags);
4196 dev_close(qdev->ndev);
4197 return status;
4198}
4199
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004200static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4201{
4202 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004203 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004204
4205 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004206 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004207 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004208 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004209 } else
4210 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004211
4212 queue_delayed_work(qdev->workqueue,
4213 &qdev->mpi_port_cfg_work, 3*HZ);
4214
Breno Leitao746079d2010-02-04 10:11:19 +00004215 ndev->mtu = new_mtu;
4216
Ron Mercer7c734352009-10-19 03:32:19 +00004217 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004218 return 0;
4219 }
4220
Ron Mercer7c734352009-10-19 03:32:19 +00004221 status = ql_change_rx_buffers(qdev);
4222 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004223 netif_err(qdev, ifup, qdev->ndev,
4224 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004225 }
4226
4227 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004228}
4229
4230static struct net_device_stats *qlge_get_stats(struct net_device
4231 *ndev)
4232{
Ron Mercer885ee392009-11-03 13:49:31 +00004233 struct ql_adapter *qdev = netdev_priv(ndev);
4234 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4235 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4236 unsigned long pkts, mcast, dropped, errors, bytes;
4237 int i;
4238
4239 /* Get RX stats. */
4240 pkts = mcast = dropped = errors = bytes = 0;
4241 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4242 pkts += rx_ring->rx_packets;
4243 bytes += rx_ring->rx_bytes;
4244 dropped += rx_ring->rx_dropped;
4245 errors += rx_ring->rx_errors;
4246 mcast += rx_ring->rx_multicast;
4247 }
4248 ndev->stats.rx_packets = pkts;
4249 ndev->stats.rx_bytes = bytes;
4250 ndev->stats.rx_dropped = dropped;
4251 ndev->stats.rx_errors = errors;
4252 ndev->stats.multicast = mcast;
4253
4254 /* Get TX stats. */
4255 pkts = errors = bytes = 0;
4256 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4257 pkts += tx_ring->tx_packets;
4258 bytes += tx_ring->tx_bytes;
4259 errors += tx_ring->tx_errors;
4260 }
4261 ndev->stats.tx_packets = pkts;
4262 ndev->stats.tx_bytes = bytes;
4263 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004264 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004265}
4266
stephen hemmingerac409212010-10-21 07:50:54 +00004267static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004268{
Joe Perchesb16fed02010-11-15 11:12:28 +00004269 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004270 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004271 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004272
Ron Mercercc288f52009-02-23 10:42:14 +00004273 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4274 if (status)
4275 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004276 /*
4277 * Set or clear promiscuous mode if a
4278 * transition is taking place.
4279 */
4280 if (ndev->flags & IFF_PROMISC) {
4281 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4282 if (ql_set_routing_reg
4283 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004284 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004285 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004286 } else {
4287 set_bit(QL_PROMISCUOUS, &qdev->flags);
4288 }
4289 }
4290 } else {
4291 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4292 if (ql_set_routing_reg
4293 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004294 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004295 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004296 } else {
4297 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4298 }
4299 }
4300 }
4301
4302 /*
4303 * Set or clear all multicast mode if a
4304 * transition is taking place.
4305 */
4306 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004307 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004308 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4309 if (ql_set_routing_reg
4310 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004311 netif_err(qdev, hw, qdev->ndev,
4312 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004313 } else {
4314 set_bit(QL_ALLMULTI, &qdev->flags);
4315 }
4316 }
4317 } else {
4318 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4319 if (ql_set_routing_reg
4320 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004321 netif_err(qdev, hw, qdev->ndev,
4322 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004323 } else {
4324 clear_bit(QL_ALLMULTI, &qdev->flags);
4325 }
4326 }
4327 }
4328
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004329 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004330 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4331 if (status)
4332 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004333 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004334 netdev_for_each_mc_addr(ha, ndev) {
4335 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004336 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004337 netif_err(qdev, hw, qdev->ndev,
4338 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004339 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004340 goto exit;
4341 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004342 i++;
4343 }
Ron Mercercc288f52009-02-23 10:42:14 +00004344 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004345 if (ql_set_routing_reg
4346 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004347 netif_err(qdev, hw, qdev->ndev,
4348 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004349 } else {
4350 set_bit(QL_ALLMULTI, &qdev->flags);
4351 }
4352 }
4353exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004354 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004355}
4356
4357static int qlge_set_mac_address(struct net_device *ndev, void *p)
4358{
Joe Perchesb16fed02010-11-15 11:12:28 +00004359 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004360 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004361 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004362
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004363 if (!is_valid_ether_addr(addr->sa_data))
4364 return -EADDRNOTAVAIL;
4365 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004366 /* Update local copy of current mac address. */
4367 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004368
Ron Mercercc288f52009-02-23 10:42:14 +00004369 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4370 if (status)
4371 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004372 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4373 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004374 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004375 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004376 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4377 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004378}
4379
4380static void qlge_tx_timeout(struct net_device *ndev)
4381{
Joe Perchesb16fed02010-11-15 11:12:28 +00004382 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004383 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004384}
4385
4386static void ql_asic_reset_work(struct work_struct *work)
4387{
4388 struct ql_adapter *qdev =
4389 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004390 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004391 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004392 status = ql_adapter_down(qdev);
4393 if (status)
4394 goto error;
4395
4396 status = ql_adapter_up(qdev);
4397 if (status)
4398 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004399
4400 /* Restore rx mode. */
4401 clear_bit(QL_ALLMULTI, &qdev->flags);
4402 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4403 qlge_set_multicast_list(qdev->ndev);
4404
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004405 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004406 return;
4407error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004408 netif_alert(qdev, ifup, qdev->ndev,
4409 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004410
Ron Mercerdb988122009-03-09 10:59:17 +00004411 set_bit(QL_ADAPTER_UP, &qdev->flags);
4412 dev_close(qdev->ndev);
4413 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004414}
4415
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004416static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004417 .get_flash = ql_get_8012_flash_params,
4418 .port_initialize = ql_8012_port_initialize,
4419};
4420
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004421static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004422 .get_flash = ql_get_8000_flash_params,
4423 .port_initialize = ql_8000_port_initialize,
4424};
4425
Ron Mercere4552f52009-06-09 05:39:32 +00004426/* Find the pcie function number for the other NIC
4427 * on this chip. Since both NIC functions share a
4428 * common firmware we have the lowest enabled function
4429 * do any common work. Examples would be resetting
4430 * after a fatal firmware error, or doing a firmware
4431 * coredump.
4432 */
4433static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004434{
Ron Mercere4552f52009-06-09 05:39:32 +00004435 int status = 0;
4436 u32 temp;
4437 u32 nic_func1, nic_func2;
4438
4439 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4440 &temp);
4441 if (status)
4442 return status;
4443
4444 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4445 MPI_TEST_NIC_FUNC_MASK);
4446 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4447 MPI_TEST_NIC_FUNC_MASK);
4448
4449 if (qdev->func == nic_func1)
4450 qdev->alt_func = nic_func2;
4451 else if (qdev->func == nic_func2)
4452 qdev->alt_func = nic_func1;
4453 else
4454 status = -EIO;
4455
4456 return status;
4457}
4458
4459static int ql_get_board_info(struct ql_adapter *qdev)
4460{
4461 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004462 qdev->func =
4463 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004464 if (qdev->func > 3)
4465 return -EIO;
4466
4467 status = ql_get_alt_pcie_func(qdev);
4468 if (status)
4469 return status;
4470
4471 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4472 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004473 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4474 qdev->port_link_up = STS_PL1;
4475 qdev->port_init = STS_PI1;
4476 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4477 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4478 } else {
4479 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4480 qdev->port_link_up = STS_PL0;
4481 qdev->port_init = STS_PI0;
4482 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4483 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4484 }
4485 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004486 qdev->device_id = qdev->pdev->device;
4487 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4488 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004489 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4490 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004491 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004492}
4493
4494static void ql_release_all(struct pci_dev *pdev)
4495{
4496 struct net_device *ndev = pci_get_drvdata(pdev);
4497 struct ql_adapter *qdev = netdev_priv(ndev);
4498
4499 if (qdev->workqueue) {
4500 destroy_workqueue(qdev->workqueue);
4501 qdev->workqueue = NULL;
4502 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004503
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004504 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004505 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004506 if (qdev->doorbell_area)
4507 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004508 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004509 pci_release_regions(pdev);
4510 pci_set_drvdata(pdev, NULL);
4511}
4512
4513static int __devinit ql_init_device(struct pci_dev *pdev,
4514 struct net_device *ndev, int cards_found)
4515{
4516 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004517 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004518
Ron Mercere3324712009-07-02 06:06:13 +00004519 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004520 err = pci_enable_device(pdev);
4521 if (err) {
4522 dev_err(&pdev->dev, "PCI device enable failed.\n");
4523 return err;
4524 }
4525
Ron Mercerebd6e772009-09-29 08:39:25 +00004526 qdev->ndev = ndev;
4527 qdev->pdev = pdev;
4528 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004529
Ron Mercerbc9167f2009-10-10 09:35:04 +00004530 /* Set PCIe read request size */
4531 err = pcie_set_readrq(pdev, 4096);
4532 if (err) {
4533 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004534 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004535 }
4536
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004537 err = pci_request_regions(pdev, DRV_NAME);
4538 if (err) {
4539 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004540 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004541 }
4542
4543 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004544 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004545 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004546 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004547 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004548 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004550 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004551 }
4552
4553 if (err) {
4554 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004555 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004556 }
4557
Ron Mercer73475332009-11-06 07:44:58 +00004558 /* Set PCIe reset type for EEH to fundamental. */
4559 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004560 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004561 qdev->reg_base =
4562 ioremap_nocache(pci_resource_start(pdev, 1),
4563 pci_resource_len(pdev, 1));
4564 if (!qdev->reg_base) {
4565 dev_err(&pdev->dev, "Register mapping failed.\n");
4566 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004567 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004568 }
4569
4570 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4571 qdev->doorbell_area =
4572 ioremap_nocache(pci_resource_start(pdev, 3),
4573 pci_resource_len(pdev, 3));
4574 if (!qdev->doorbell_area) {
4575 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4576 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004577 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578 }
4579
Ron Mercere4552f52009-06-09 05:39:32 +00004580 err = ql_get_board_info(qdev);
4581 if (err) {
4582 dev_err(&pdev->dev, "Register access failed.\n");
4583 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004584 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004585 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004586 qdev->msg_enable = netif_msg_init(debug, default_msg);
4587 spin_lock_init(&qdev->hw_lock);
4588 spin_lock_init(&qdev->stats_lock);
4589
Ron Mercer8aae2602010-01-15 13:31:28 +00004590 if (qlge_mpi_coredump) {
4591 qdev->mpi_coredump =
4592 vmalloc(sizeof(struct ql_mpi_coredump));
4593 if (qdev->mpi_coredump == NULL) {
4594 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4595 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004596 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004597 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004598 if (qlge_force_coredump)
4599 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004600 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004601 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004602 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004603 if (err) {
4604 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004605 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004606 }
4607
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004608 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004609 /* Keep local copy of current mac address. */
4610 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004611
4612 /* Set up the default ring sizes. */
4613 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4614 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4615
4616 /* Set up the coalescing parameters. */
4617 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4618 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4619 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4620 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4621
4622 /*
4623 * Set up the operating parameters.
4624 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004625 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4626 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4627 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4628 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004629 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004630 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004631 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004632 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004633 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004634
4635 if (!cards_found) {
4636 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4637 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4638 DRV_NAME, DRV_VERSION);
4639 }
4640 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004641err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004642 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004643err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004644 pci_disable_device(pdev);
4645 return err;
4646}
4647
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004648static const struct net_device_ops qlge_netdev_ops = {
4649 .ndo_open = qlge_open,
4650 .ndo_stop = qlge_close,
4651 .ndo_start_xmit = qlge_send,
4652 .ndo_change_mtu = qlge_change_mtu,
4653 .ndo_get_stats = qlge_get_stats,
4654 .ndo_set_multicast_list = qlge_set_multicast_list,
4655 .ndo_set_mac_address = qlge_set_mac_address,
4656 .ndo_validate_addr = eth_validate_addr,
4657 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004658 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4659 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4660 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004661};
4662
Ron Mercer15c052f2010-02-04 13:32:46 -08004663static void ql_timer(unsigned long data)
4664{
4665 struct ql_adapter *qdev = (struct ql_adapter *)data;
4666 u32 var = 0;
4667
4668 var = ql_read32(qdev, STS);
4669 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004670 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004671 return;
4672 }
4673
Breno Leitao72046d82010-07-01 03:00:17 +00004674 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004675}
4676
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004677static int __devinit qlge_probe(struct pci_dev *pdev,
4678 const struct pci_device_id *pci_entry)
4679{
4680 struct net_device *ndev = NULL;
4681 struct ql_adapter *qdev = NULL;
4682 static int cards_found = 0;
4683 int err = 0;
4684
Ron Mercer1e213302009-03-09 10:59:21 +00004685 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4686 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004687 if (!ndev)
4688 return -ENOMEM;
4689
4690 err = ql_init_device(pdev, ndev, cards_found);
4691 if (err < 0) {
4692 free_netdev(ndev);
4693 return err;
4694 }
4695
4696 qdev = netdev_priv(ndev);
4697 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004698 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4699 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4700 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4701 ndev->features = ndev->hw_features |
4702 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004703
4704 if (test_bit(QL_DMA64, &qdev->flags))
4705 ndev->features |= NETIF_F_HIGHDMA;
4706
4707 /*
4708 * Set up net_device structure.
4709 */
4710 ndev->tx_queue_len = qdev->tx_ring_size;
4711 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004712
4713 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004714 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004715 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004716
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004717 err = register_netdev(ndev);
4718 if (err) {
4719 dev_err(&pdev->dev, "net device registration failed.\n");
4720 ql_release_all(pdev);
4721 pci_disable_device(pdev);
4722 return err;
4723 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004724 /* Start up the timer to trigger EEH if
4725 * the bus goes dead
4726 */
4727 init_timer_deferrable(&qdev->timer);
4728 qdev->timer.data = (unsigned long)qdev;
4729 qdev->timer.function = ql_timer;
4730 qdev->timer.expires = jiffies + (5*HZ);
4731 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004732 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004733 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004734 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004735 cards_found++;
4736 return 0;
4737}
4738
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004739netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4740{
4741 return qlge_send(skb, ndev);
4742}
4743
4744int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4745{
4746 return ql_clean_inbound_rx_ring(rx_ring, budget);
4747}
4748
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004749static void __devexit qlge_remove(struct pci_dev *pdev)
4750{
4751 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004752 struct ql_adapter *qdev = netdev_priv(ndev);
4753 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004754 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004755 unregister_netdev(ndev);
4756 ql_release_all(pdev);
4757 pci_disable_device(pdev);
4758 free_netdev(ndev);
4759}
4760
Ron Mercer6d190c62009-10-28 08:39:20 +00004761/* Clean up resources without touching hardware. */
4762static void ql_eeh_close(struct net_device *ndev)
4763{
4764 int i;
4765 struct ql_adapter *qdev = netdev_priv(ndev);
4766
4767 if (netif_carrier_ok(ndev)) {
4768 netif_carrier_off(ndev);
4769 netif_stop_queue(ndev);
4770 }
4771
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004772 /* Disabling the timer */
4773 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004774 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004775
4776 for (i = 0; i < qdev->rss_ring_count; i++)
4777 netif_napi_del(&qdev->rx_ring[i].napi);
4778
4779 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4780 ql_tx_ring_clean(qdev);
4781 ql_free_rx_buffers(qdev);
4782 ql_release_adapter_resources(qdev);
4783}
4784
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004785/*
4786 * This callback is called by the PCI subsystem whenever
4787 * a PCI bus error is detected.
4788 */
4789static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4790 enum pci_channel_state state)
4791{
4792 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004793 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004794
Ron Mercer6d190c62009-10-28 08:39:20 +00004795 switch (state) {
4796 case pci_channel_io_normal:
4797 return PCI_ERS_RESULT_CAN_RECOVER;
4798 case pci_channel_io_frozen:
4799 netif_device_detach(ndev);
4800 if (netif_running(ndev))
4801 ql_eeh_close(ndev);
4802 pci_disable_device(pdev);
4803 return PCI_ERS_RESULT_NEED_RESET;
4804 case pci_channel_io_perm_failure:
4805 dev_err(&pdev->dev,
4806 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004807 ql_eeh_close(ndev);
4808 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004809 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004810 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004811
4812 /* Request a slot reset. */
4813 return PCI_ERS_RESULT_NEED_RESET;
4814}
4815
4816/*
4817 * This callback is called after the PCI buss has been reset.
4818 * Basically, this tries to restart the card from scratch.
4819 * This is a shortened version of the device probe/discovery code,
4820 * it resembles the first-half of the () routine.
4821 */
4822static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4823{
4824 struct net_device *ndev = pci_get_drvdata(pdev);
4825 struct ql_adapter *qdev = netdev_priv(ndev);
4826
Ron Mercer6d190c62009-10-28 08:39:20 +00004827 pdev->error_state = pci_channel_io_normal;
4828
4829 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004830 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004831 netif_err(qdev, ifup, qdev->ndev,
4832 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004833 return PCI_ERS_RESULT_DISCONNECT;
4834 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004835 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004836
4837 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004838 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004839 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004840 return PCI_ERS_RESULT_DISCONNECT;
4841 }
4842
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004843 return PCI_ERS_RESULT_RECOVERED;
4844}
4845
4846static void qlge_io_resume(struct pci_dev *pdev)
4847{
4848 struct net_device *ndev = pci_get_drvdata(pdev);
4849 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004850 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004851
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004852 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004853 err = qlge_open(ndev);
4854 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004855 netif_err(qdev, ifup, qdev->ndev,
4856 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004857 return;
4858 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004859 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004860 netif_err(qdev, ifup, qdev->ndev,
4861 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004862 }
Breno Leitao72046d82010-07-01 03:00:17 +00004863 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004864 netif_device_attach(ndev);
4865}
4866
4867static struct pci_error_handlers qlge_err_handler = {
4868 .error_detected = qlge_io_error_detected,
4869 .slot_reset = qlge_io_slot_reset,
4870 .resume = qlge_io_resume,
4871};
4872
4873static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4874{
4875 struct net_device *ndev = pci_get_drvdata(pdev);
4876 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004877 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004878
4879 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004880 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004881
4882 if (netif_running(ndev)) {
4883 err = ql_adapter_down(qdev);
4884 if (!err)
4885 return err;
4886 }
4887
Ron Mercerbc083ce2009-10-21 11:07:40 +00004888 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004889 err = pci_save_state(pdev);
4890 if (err)
4891 return err;
4892
4893 pci_disable_device(pdev);
4894
4895 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4896
4897 return 0;
4898}
4899
David S. Miller04da2cf2008-09-19 16:14:24 -07004900#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004901static int qlge_resume(struct pci_dev *pdev)
4902{
4903 struct net_device *ndev = pci_get_drvdata(pdev);
4904 struct ql_adapter *qdev = netdev_priv(ndev);
4905 int err;
4906
4907 pci_set_power_state(pdev, PCI_D0);
4908 pci_restore_state(pdev);
4909 err = pci_enable_device(pdev);
4910 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004911 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004912 return err;
4913 }
4914 pci_set_master(pdev);
4915
4916 pci_enable_wake(pdev, PCI_D3hot, 0);
4917 pci_enable_wake(pdev, PCI_D3cold, 0);
4918
4919 if (netif_running(ndev)) {
4920 err = ql_adapter_up(qdev);
4921 if (err)
4922 return err;
4923 }
4924
Breno Leitao72046d82010-07-01 03:00:17 +00004925 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004926 netif_device_attach(ndev);
4927
4928 return 0;
4929}
David S. Miller04da2cf2008-09-19 16:14:24 -07004930#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004931
4932static void qlge_shutdown(struct pci_dev *pdev)
4933{
4934 qlge_suspend(pdev, PMSG_SUSPEND);
4935}
4936
4937static struct pci_driver qlge_driver = {
4938 .name = DRV_NAME,
4939 .id_table = qlge_pci_tbl,
4940 .probe = qlge_probe,
4941 .remove = __devexit_p(qlge_remove),
4942#ifdef CONFIG_PM
4943 .suspend = qlge_suspend,
4944 .resume = qlge_resume,
4945#endif
4946 .shutdown = qlge_shutdown,
4947 .err_handler = &qlge_err_handler
4948};
4949
4950static int __init qlge_init_module(void)
4951{
4952 return pci_register_driver(&qlge_driver);
4953}
4954
4955static void __exit qlge_exit(void)
4956{
4957 pci_unregister_driver(&qlge_driver);
4958}
4959
4960module_init(qlge_init_module);
4961module_exit(qlge_exit);