blob: 4fbefcf989a00bb134da6e3a8b2e5d650e81b846 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040041#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000061/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040063/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
Sonny Rao84cf7022010-11-18 11:50:02 +000066static int debug = -1; /* defaults above */
67module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040068MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000073static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000074module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000075MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040076
Ron Mercer8aae2602010-01-15 13:31:28 +000077static int qlge_mpi_coredump;
78module_param(qlge_mpi_coredump, int, 0);
79MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000081 "Default is OFF - Do Not allocate memory. ");
82
83static int qlge_force_coredump;
84module_param(qlge_force_coredump, int, 0);
85MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000088
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000089static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040092 /* required last entry */
93 {0,}
94};
95
96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
stephen hemmingerac409212010-10-21 07:50:54 +000098static int ql_wol(struct ql_adapter *qdev);
99static void qlge_set_multicast_list(struct net_device *ndev);
100
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400101/* This hardware semaphore causes exclusive access to
102 * resources shared between the NIC driver, MPI firmware,
103 * FCOE firmware and the FC driver.
104 */
105static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
106{
107 u32 sem_bits = 0;
108
109 switch (sem_mask) {
110 case SEM_XGMAC0_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
112 break;
113 case SEM_XGMAC1_MASK:
114 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
115 break;
116 case SEM_ICB_MASK:
117 sem_bits = SEM_SET << SEM_ICB_SHIFT;
118 break;
119 case SEM_MAC_ADDR_MASK:
120 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
121 break;
122 case SEM_FLASH_MASK:
123 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
124 break;
125 case SEM_PROBE_MASK:
126 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
127 break;
128 case SEM_RT_IDX_MASK:
129 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
130 break;
131 case SEM_PROC_REG_MASK:
132 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
133 break;
134 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000135 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400136 return -EINVAL;
137 }
138
139 ql_write32(qdev, SEM, sem_bits | sem_mask);
140 return !(ql_read32(qdev, SEM) & sem_bits);
141}
142
143int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
144{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000145 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400146 do {
147 if (!ql_sem_trylock(qdev, sem_mask))
148 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000149 udelay(100);
150 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400151 return -ETIMEDOUT;
152}
153
154void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
155{
156 ql_write32(qdev, SEM, sem_mask);
157 ql_read32(qdev, SEM); /* flush */
158}
159
160/* This function waits for a specific bit to come ready
161 * in a given register. It is used mostly by the initialize
162 * process, but is also used in kernel thread API such as
163 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
164 */
165int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
166{
167 u32 temp;
168 int count = UDELAY_COUNT;
169
170 while (count) {
171 temp = ql_read32(qdev, reg);
172
173 /* check for errors */
174 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000175 netif_alert(qdev, probe, qdev->ndev,
176 "register 0x%.08x access error, value = 0x%.08x!.\n",
177 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400178 return -EIO;
179 } else if (temp & bit)
180 return 0;
181 udelay(UDELAY_DELAY);
182 count--;
183 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000184 netif_alert(qdev, probe, qdev->ndev,
185 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400186 return -ETIMEDOUT;
187}
188
189/* The CFG register is used to download TX and RX control blocks
190 * to the chip. This function waits for an operation to complete.
191 */
192static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
193{
194 int count = UDELAY_COUNT;
195 u32 temp;
196
197 while (count) {
198 temp = ql_read32(qdev, CFG);
199 if (temp & CFG_LE)
200 return -EIO;
201 if (!(temp & bit))
202 return 0;
203 udelay(UDELAY_DELAY);
204 count--;
205 }
206 return -ETIMEDOUT;
207}
208
209
210/* Used to issue init control blocks to hw. Maps control block,
211 * sets address, triggers download, waits for completion.
212 */
213int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
214 u16 q_id)
215{
216 u64 map;
217 int status = 0;
218 int direction;
219 u32 mask;
220 u32 value;
221
222 direction =
223 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
224 PCI_DMA_FROMDEVICE;
225
226 map = pci_map_single(qdev->pdev, ptr, size, direction);
227 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000228 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229 return -ENOMEM;
230 }
231
Ron Mercer4322c5b2009-07-02 06:06:06 +0000232 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
233 if (status)
234 return status;
235
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400236 status = ql_wait_cfg(qdev, bit);
237 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000238 netif_err(qdev, ifup, qdev->ndev,
239 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 goto exit;
241 }
242
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400243 ql_write32(qdev, ICB_L, (u32) map);
244 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400245
246 mask = CFG_Q_MASK | (bit << 16);
247 value = bit | (q_id << CFG_Q_SHIFT);
248 ql_write32(qdev, CFG, (mask | value));
249
250 /*
251 * Wait for the bit to clear after signaling hw.
252 */
253 status = ql_wait_cfg(qdev, bit);
254exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000255 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400256 pci_unmap_single(qdev->pdev, map, size, direction);
257 return status;
258}
259
260/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
261int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
262 u32 *value)
263{
264 u32 offset = 0;
265 int status;
266
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400267 switch (type) {
268 case MAC_ADDR_TYPE_MULTI_MAC:
269 case MAC_ADDR_TYPE_CAM_MAC:
270 {
271 status =
272 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800273 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400274 if (status)
275 goto exit;
276 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277 (index << MAC_ADDR_IDX_SHIFT) | /* index */
278 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279 status =
280 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800281 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400282 if (status)
283 goto exit;
284 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285 status =
286 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800287 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400288 if (status)
289 goto exit;
290 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
291 (index << MAC_ADDR_IDX_SHIFT) | /* index */
292 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293 status =
294 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800295 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400296 if (status)
297 goto exit;
298 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
299 if (type == MAC_ADDR_TYPE_CAM_MAC) {
300 status =
301 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800302 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400303 if (status)
304 goto exit;
305 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
306 (index << MAC_ADDR_IDX_SHIFT) | /* index */
307 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
308 status =
309 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800310 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400311 if (status)
312 goto exit;
313 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
314 }
315 break;
316 }
317 case MAC_ADDR_TYPE_VLAN:
318 case MAC_ADDR_TYPE_MULTI_FLTR:
319 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000320 netif_crit(qdev, ifup, qdev->ndev,
321 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400322 status = -EPERM;
323 }
324exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400325 return status;
326}
327
328/* Set up a MAC, multicast or VLAN address for the
329 * inbound frame matching.
330 */
331static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
332 u16 index)
333{
334 u32 offset = 0;
335 int status = 0;
336
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400337 switch (type) {
338 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000339 {
340 u32 upper = (addr[0] << 8) | addr[1];
341 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
342 (addr[4] << 8) | (addr[5]);
343
344 status =
345 ql_wait_reg_rdy(qdev,
346 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
347 if (status)
348 goto exit;
349 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
350 (index << MAC_ADDR_IDX_SHIFT) |
351 type | MAC_ADDR_E);
352 ql_write32(qdev, MAC_ADDR_DATA, lower);
353 status =
354 ql_wait_reg_rdy(qdev,
355 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
356 if (status)
357 goto exit;
358 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
359 (index << MAC_ADDR_IDX_SHIFT) |
360 type | MAC_ADDR_E);
361
362 ql_write32(qdev, MAC_ADDR_DATA, upper);
363 status =
364 ql_wait_reg_rdy(qdev,
365 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
366 if (status)
367 goto exit;
368 break;
369 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400370 case MAC_ADDR_TYPE_CAM_MAC:
371 {
372 u32 cam_output;
373 u32 upper = (addr[0] << 8) | addr[1];
374 u32 lower =
375 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
376 (addr[5]);
377
Joe Perchesae9540f72010-02-09 11:49:52 +0000378 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
379 "Adding %s address %pM at index %d in the CAM.\n",
380 type == MAC_ADDR_TYPE_MULTI_MAC ?
381 "MULTICAST" : "UNICAST",
382 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383
384 status =
385 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800386 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400387 if (status)
388 goto exit;
389 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
390 (index << MAC_ADDR_IDX_SHIFT) | /* index */
391 type); /* type */
392 ql_write32(qdev, MAC_ADDR_DATA, lower);
393 status =
394 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800395 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400396 if (status)
397 goto exit;
398 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
399 (index << MAC_ADDR_IDX_SHIFT) | /* index */
400 type); /* type */
401 ql_write32(qdev, MAC_ADDR_DATA, upper);
402 status =
403 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800404 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400405 if (status)
406 goto exit;
407 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
408 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 type); /* type */
410 /* This field should also include the queue id
411 and possibly the function id. Right now we hardcode
412 the route field to NIC core.
413 */
Ron Mercer76b26692009-10-08 09:54:40 +0000414 cam_output = (CAM_OUT_ROUTE_NIC |
415 (qdev->
416 func << CAM_OUT_FUNC_SHIFT) |
417 (0 << CAM_OUT_CQ_ID_SHIFT));
418 if (qdev->vlgrp)
419 cam_output |= CAM_OUT_RV;
420 /* route to NIC core */
421 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400422 break;
423 }
424 case MAC_ADDR_TYPE_VLAN:
425 {
426 u32 enable_bit = *((u32 *) &addr[0]);
427 /* For VLAN, the addr actually holds a bit that
428 * either enables or disables the vlan id we are
429 * addressing. It's either MAC_ADDR_E on or off.
430 * That's bit-27 we're talking about.
431 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000432 netif_info(qdev, ifup, qdev->ndev,
433 "%s VLAN ID %d %s the CAM.\n",
434 enable_bit ? "Adding" : "Removing",
435 index,
436 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400437
438 status =
439 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800440 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 if (status)
442 goto exit;
443 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
444 (index << MAC_ADDR_IDX_SHIFT) | /* index */
445 type | /* type */
446 enable_bit); /* enable/disable */
447 break;
448 }
449 case MAC_ADDR_TYPE_MULTI_FLTR:
450 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000451 netif_crit(qdev, ifup, qdev->ndev,
452 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400453 status = -EPERM;
454 }
455exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400456 return status;
457}
458
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000459/* Set or clear MAC address in hardware. We sometimes
460 * have to clear it to prevent wrong frame routing
461 * especially in a bonding environment.
462 */
463static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
464{
465 int status;
466 char zero_mac_addr[ETH_ALEN];
467 char *addr;
468
469 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000470 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000471 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
472 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000473 } else {
474 memset(zero_mac_addr, 0, ETH_ALEN);
475 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000476 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
477 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000478 }
479 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
480 if (status)
481 return status;
482 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
483 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
484 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
485 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000486 netif_err(qdev, ifup, qdev->ndev,
487 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000488 return status;
489}
490
Ron Mercer6a473302009-07-02 06:06:12 +0000491void ql_link_on(struct ql_adapter *qdev)
492{
Joe Perchesae9540f72010-02-09 11:49:52 +0000493 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000494 netif_carrier_on(qdev->ndev);
495 ql_set_mac_addr(qdev, 1);
496}
497
498void ql_link_off(struct ql_adapter *qdev)
499{
Joe Perchesae9540f72010-02-09 11:49:52 +0000500 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000501 netif_carrier_off(qdev->ndev);
502 ql_set_mac_addr(qdev, 0);
503}
504
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400505/* Get a specific frame routing value from the CAM.
506 * Used for debug and reg dump.
507 */
508int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
509{
510 int status = 0;
511
Ron Mercer939678f2009-01-04 17:08:29 -0800512 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400513 if (status)
514 goto exit;
515
516 ql_write32(qdev, RT_IDX,
517 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800518 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400519 if (status)
520 goto exit;
521 *value = ql_read32(qdev, RT_DATA);
522exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 return status;
524}
525
526/* The NIC function for this chip has 16 routing indexes. Each one can be used
527 * to route different frame types to various inbound queues. We send broadcast/
528 * multicast/error frames to the default queue for slow handling,
529 * and CAM hit/RSS frames to the fast handling queues.
530 */
531static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
532 int enable)
533{
Ron Mercer8587ea32009-02-23 10:42:15 +0000534 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400535 u32 value = 0;
536
Joe Perchesae9540f72010-02-09 11:49:52 +0000537 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
538 "%s %s mask %s the routing reg.\n",
539 enable ? "Adding" : "Removing",
540 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
541 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
542 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
543 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
544 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
545 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
546 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
547 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
548 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
549 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
550 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
551 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
552 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
553 index == RT_IDX_UNUSED013 ? "UNUSED13" :
554 index == RT_IDX_UNUSED014 ? "UNUSED14" :
555 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
556 "(Bad index != RT_IDX)",
557 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400558
559 switch (mask) {
560 case RT_IDX_CAM_HIT:
561 {
562 value = RT_IDX_DST_CAM_Q | /* dest */
563 RT_IDX_TYPE_NICQ | /* type */
564 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
565 break;
566 }
567 case RT_IDX_VALID: /* Promiscuous Mode frames. */
568 {
569 value = RT_IDX_DST_DFLT_Q | /* dest */
570 RT_IDX_TYPE_NICQ | /* type */
571 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
572 break;
573 }
574 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
575 {
576 value = RT_IDX_DST_DFLT_Q | /* dest */
577 RT_IDX_TYPE_NICQ | /* type */
578 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
579 break;
580 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000581 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
582 {
583 value = RT_IDX_DST_DFLT_Q | /* dest */
584 RT_IDX_TYPE_NICQ | /* type */
585 (RT_IDX_IP_CSUM_ERR_SLOT <<
586 RT_IDX_IDX_SHIFT); /* index */
587 break;
588 }
589 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
590 {
591 value = RT_IDX_DST_DFLT_Q | /* dest */
592 RT_IDX_TYPE_NICQ | /* type */
593 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
594 RT_IDX_IDX_SHIFT); /* index */
595 break;
596 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400597 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
605 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000606 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400607 RT_IDX_TYPE_NICQ | /* type */
608 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
609 break;
610 }
611 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
612 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000613 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 RT_IDX_TYPE_NICQ | /* type */
615 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
616 break;
617 }
618 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
619 {
620 value = RT_IDX_DST_RSS | /* dest */
621 RT_IDX_TYPE_NICQ | /* type */
622 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
623 break;
624 }
625 case 0: /* Clear the E-bit on an entry. */
626 {
627 value = RT_IDX_DST_DFLT_Q | /* dest */
628 RT_IDX_TYPE_NICQ | /* type */
629 (index << RT_IDX_IDX_SHIFT);/* index */
630 break;
631 }
632 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000633 netif_err(qdev, ifup, qdev->ndev,
634 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400635 status = -EPERM;
636 goto exit;
637 }
638
639 if (value) {
640 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
641 if (status)
642 goto exit;
643 value |= (enable ? RT_IDX_E : 0);
644 ql_write32(qdev, RT_IDX, value);
645 ql_write32(qdev, RT_DATA, enable ? mask : 0);
646 }
647exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400648 return status;
649}
650
651static void ql_enable_interrupts(struct ql_adapter *qdev)
652{
653 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
654}
655
656static void ql_disable_interrupts(struct ql_adapter *qdev)
657{
658 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
659}
660
661/* If we're running with multiple MSI-X vectors then we enable on the fly.
662 * Otherwise, we may have multiple outstanding workers and don't want to
663 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300664 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400665 * a worker finishes. Once it hits zero we enable the interrupt.
666 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700667u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400668{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700669 u32 var = 0;
670 unsigned long hw_flags = 0;
671 struct intr_context *ctx = qdev->intr_context + intr;
672
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
674 /* Always enable if we're MSIX multi interrupts and
675 * it's not the default (zeroeth) interrupt.
676 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 ctx->intr_en_mask);
679 var = ql_read32(qdev, STS);
680 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700682
683 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
684 if (atomic_dec_and_test(&ctx->irq_cnt)) {
685 ql_write32(qdev, INTR_EN,
686 ctx->intr_en_mask);
687 var = ql_read32(qdev, STS);
688 }
689 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
690 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400691}
692
693static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
694{
695 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700696 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400697
Ron Mercerbb0d2152008-10-20 10:30:26 -0700698 /* HW disables for us if we're MSIX multi interrupts and
699 * it's not the default (zeroeth) interrupt.
700 */
701 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
702 return 0;
703
704 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000705 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700706 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400707 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700708 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400709 var = ql_read32(qdev, STS);
710 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700711 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000712 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400713 return var;
714}
715
716static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
717{
718 int i;
719 for (i = 0; i < qdev->intr_count; i++) {
720 /* The enable call does a atomic_dec_and_test
721 * and enables only if the result is zero.
722 * So we precharge it here.
723 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700724 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
725 i == 0))
726 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400727 ql_enable_completion_interrupt(qdev, i);
728 }
729
730}
731
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000732static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
733{
734 int status, i;
735 u16 csum = 0;
736 __le16 *flash = (__le16 *)&qdev->flash;
737
738 status = strncmp((char *)&qdev->flash, str, 4);
739 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000740 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000741 return status;
742 }
743
744 for (i = 0; i < size; i++)
745 csum += le16_to_cpu(*flash++);
746
747 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000748 netif_err(qdev, ifup, qdev->ndev,
749 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000750
751 return csum;
752}
753
Ron Mercer26351472009-02-02 13:53:57 -0800754static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400755{
756 int status = 0;
757 /* wait for reg to come ready */
758 status = ql_wait_reg_rdy(qdev,
759 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
760 if (status)
761 goto exit;
762 /* set up for reg read */
763 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
764 /* wait for reg to come ready */
765 status = ql_wait_reg_rdy(qdev,
766 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
767 if (status)
768 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800769 /* This data is stored on flash as an array of
770 * __le32. Since ql_read32() returns cpu endian
771 * we need to swap it back.
772 */
773 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400774exit:
775 return status;
776}
777
Ron Mercercdca8d02009-03-02 08:07:31 +0000778static int ql_get_8000_flash_params(struct ql_adapter *qdev)
779{
780 u32 i, size;
781 int status;
782 __le32 *p = (__le32 *)&qdev->flash;
783 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000784 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000785
786 /* Get flash offset for function and adjust
787 * for dword access.
788 */
Ron Mercere4552f52009-06-09 05:39:32 +0000789 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000790 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
791 else
792 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
793
794 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
795 return -ETIMEDOUT;
796
797 size = sizeof(struct flash_params_8000) / sizeof(u32);
798 for (i = 0; i < size; i++, p++) {
799 status = ql_read_flash_word(qdev, i+offset, p);
800 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000801 netif_err(qdev, ifup, qdev->ndev,
802 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000803 goto exit;
804 }
805 }
806
807 status = ql_validate_flash(qdev,
808 sizeof(struct flash_params_8000) / sizeof(u16),
809 "8000");
810 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000811 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000812 status = -EINVAL;
813 goto exit;
814 }
815
Ron Mercer542512e2009-06-09 05:39:33 +0000816 /* Extract either manufacturer or BOFM modified
817 * MAC address.
818 */
819 if (qdev->flash.flash_params_8000.data_type1 == 2)
820 memcpy(mac_addr,
821 qdev->flash.flash_params_8000.mac_addr1,
822 qdev->ndev->addr_len);
823 else
824 memcpy(mac_addr,
825 qdev->flash.flash_params_8000.mac_addr,
826 qdev->ndev->addr_len);
827
828 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000830 status = -EINVAL;
831 goto exit;
832 }
833
834 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000835 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000836 qdev->ndev->addr_len);
837
838exit:
839 ql_sem_unlock(qdev, SEM_FLASH_MASK);
840 return status;
841}
842
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000843static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400844{
845 int i;
846 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800847 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800848 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000849 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800850
851 /* Second function's parameters follow the first
852 * function's.
853 */
Ron Mercere4552f52009-06-09 05:39:32 +0000854 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000855 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400856
857 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
858 return -ETIMEDOUT;
859
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000860 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800861 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400862 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000863 netif_err(qdev, ifup, qdev->ndev,
864 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400865 goto exit;
866 }
867
868 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000869
870 status = ql_validate_flash(qdev,
871 sizeof(struct flash_params_8012) / sizeof(u16),
872 "8012");
873 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000874 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000875 status = -EINVAL;
876 goto exit;
877 }
878
879 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
880 status = -EINVAL;
881 goto exit;
882 }
883
884 memcpy(qdev->ndev->dev_addr,
885 qdev->flash.flash_params_8012.mac_addr,
886 qdev->ndev->addr_len);
887
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400888exit:
889 ql_sem_unlock(qdev, SEM_FLASH_MASK);
890 return status;
891}
892
893/* xgmac register are located behind the xgmac_addr and xgmac_data
894 * register pair. Each read/write requires us to wait for the ready
895 * bit before reading/writing the data.
896 */
897static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
898{
899 int status;
900 /* wait for reg to come ready */
901 status = ql_wait_reg_rdy(qdev,
902 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
903 if (status)
904 return status;
905 /* write the data to the data reg */
906 ql_write32(qdev, XGMAC_DATA, data);
907 /* trigger the write */
908 ql_write32(qdev, XGMAC_ADDR, reg);
909 return status;
910}
911
912/* xgmac register are located behind the xgmac_addr and xgmac_data
913 * register pair. Each read/write requires us to wait for the ready
914 * bit before reading/writing the data.
915 */
916int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
917{
918 int status = 0;
919 /* wait for reg to come ready */
920 status = ql_wait_reg_rdy(qdev,
921 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
922 if (status)
923 goto exit;
924 /* set up for reg read */
925 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
926 /* wait for reg to come ready */
927 status = ql_wait_reg_rdy(qdev,
928 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
929 if (status)
930 goto exit;
931 /* get the data */
932 *data = ql_read32(qdev, XGMAC_DATA);
933exit:
934 return status;
935}
936
937/* This is used for reading the 64-bit statistics regs. */
938int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
939{
940 int status = 0;
941 u32 hi = 0;
942 u32 lo = 0;
943
944 status = ql_read_xgmac_reg(qdev, reg, &lo);
945 if (status)
946 goto exit;
947
948 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
949 if (status)
950 goto exit;
951
952 *data = (u64) lo | ((u64) hi << 32);
953
954exit:
955 return status;
956}
957
Ron Mercercdca8d02009-03-02 08:07:31 +0000958static int ql_8000_port_initialize(struct ql_adapter *qdev)
959{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000960 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000961 /*
962 * Get MPI firmware version for driver banner
963 * and ethool info.
964 */
965 status = ql_mb_about_fw(qdev);
966 if (status)
967 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000968 status = ql_mb_get_fw_state(qdev);
969 if (status)
970 goto exit;
971 /* Wake up a worker to get/set the TX/RX frame sizes. */
972 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
973exit:
974 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000975}
976
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400977/* Take the MAC Core out of reset.
978 * Enable statistics counting.
979 * Take the transmitter/receiver out of reset.
980 * This functionality may be done in the MPI firmware at a
981 * later date.
982 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000983static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400984{
985 int status = 0;
986 u32 data;
987
988 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
989 /* Another function has the semaphore, so
990 * wait for the port init bit to come ready.
991 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000992 netif_info(qdev, link, qdev->ndev,
993 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400994 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
995 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000996 netif_crit(qdev, link, qdev->ndev,
997 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400998 }
999 return status;
1000 }
1001
Joe Perchesae9540f72010-02-09 11:49:52 +00001002 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001003 /* Set the core reset. */
1004 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1005 if (status)
1006 goto end;
1007 data |= GLOBAL_CFG_RESET;
1008 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1009 if (status)
1010 goto end;
1011
1012 /* Clear the core reset and turn on jumbo for receiver. */
1013 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1014 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1015 data |= GLOBAL_CFG_TX_STAT_EN;
1016 data |= GLOBAL_CFG_RX_STAT_EN;
1017 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1018 if (status)
1019 goto end;
1020
1021 /* Enable transmitter, and clear it's reset. */
1022 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1023 if (status)
1024 goto end;
1025 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1026 data |= TX_CFG_EN; /* Enable the transmitter. */
1027 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1028 if (status)
1029 goto end;
1030
1031 /* Enable receiver and clear it's reset. */
1032 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1033 if (status)
1034 goto end;
1035 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1036 data |= RX_CFG_EN; /* Enable the receiver. */
1037 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1038 if (status)
1039 goto end;
1040
1041 /* Turn on jumbo. */
1042 status =
1043 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1044 if (status)
1045 goto end;
1046 status =
1047 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1048 if (status)
1049 goto end;
1050
1051 /* Signal to the world that the port is enabled. */
1052 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1053end:
1054 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1055 return status;
1056}
1057
Ron Mercer7c734352009-10-19 03:32:19 +00001058static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1059{
1060 return PAGE_SIZE << qdev->lbq_buf_order;
1061}
1062
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001063/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001064static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001065{
1066 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1067 rx_ring->lbq_curr_idx++;
1068 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1069 rx_ring->lbq_curr_idx = 0;
1070 rx_ring->lbq_free_cnt++;
1071 return lbq_desc;
1072}
1073
Ron Mercer7c734352009-10-19 03:32:19 +00001074static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1075 struct rx_ring *rx_ring)
1076{
1077 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1078
1079 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001080 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001081 rx_ring->lbq_buf_size,
1082 PCI_DMA_FROMDEVICE);
1083
1084 /* If it's the last chunk of our master page then
1085 * we unmap it.
1086 */
1087 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1088 == ql_lbq_block_size(qdev))
1089 pci_unmap_page(qdev->pdev,
1090 lbq_desc->p.pg_chunk.map,
1091 ql_lbq_block_size(qdev),
1092 PCI_DMA_FROMDEVICE);
1093 return lbq_desc;
1094}
1095
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001096/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001097static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001098{
1099 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1100 rx_ring->sbq_curr_idx++;
1101 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1102 rx_ring->sbq_curr_idx = 0;
1103 rx_ring->sbq_free_cnt++;
1104 return sbq_desc;
1105}
1106
1107/* Update an rx ring index. */
1108static void ql_update_cq(struct rx_ring *rx_ring)
1109{
1110 rx_ring->cnsmr_idx++;
1111 rx_ring->curr_entry++;
1112 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1113 rx_ring->cnsmr_idx = 0;
1114 rx_ring->curr_entry = rx_ring->cq_base;
1115 }
1116}
1117
1118static void ql_write_cq_idx(struct rx_ring *rx_ring)
1119{
1120 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1121}
1122
Ron Mercer7c734352009-10-19 03:32:19 +00001123static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1124 struct bq_desc *lbq_desc)
1125{
1126 if (!rx_ring->pg_chunk.page) {
1127 u64 map;
1128 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1129 GFP_ATOMIC,
1130 qdev->lbq_buf_order);
1131 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001132 netif_err(qdev, drv, qdev->ndev,
1133 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001134 return -ENOMEM;
1135 }
1136 rx_ring->pg_chunk.offset = 0;
1137 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1138 0, ql_lbq_block_size(qdev),
1139 PCI_DMA_FROMDEVICE);
1140 if (pci_dma_mapping_error(qdev->pdev, map)) {
1141 __free_pages(rx_ring->pg_chunk.page,
1142 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001143 netif_err(qdev, drv, qdev->ndev,
1144 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001145 return -ENOMEM;
1146 }
1147 rx_ring->pg_chunk.map = map;
1148 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1149 }
1150
1151 /* Copy the current master pg_chunk info
1152 * to the current descriptor.
1153 */
1154 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1155
1156 /* Adjust the master page chunk for next
1157 * buffer get.
1158 */
1159 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1160 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1161 rx_ring->pg_chunk.page = NULL;
1162 lbq_desc->p.pg_chunk.last_flag = 1;
1163 } else {
1164 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1165 get_page(rx_ring->pg_chunk.page);
1166 lbq_desc->p.pg_chunk.last_flag = 0;
1167 }
1168 return 0;
1169}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001170/* Process (refill) a large buffer queue. */
1171static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1172{
Ron Mercer49f21862009-02-23 10:42:16 +00001173 u32 clean_idx = rx_ring->lbq_clean_idx;
1174 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001175 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 u64 map;
1177 int i;
1178
Ron Mercer7c734352009-10-19 03:32:19 +00001179 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001180 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001181 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1182 "lbq: try cleaning clean_idx = %d.\n",
1183 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001184 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001185 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001186 netif_err(qdev, ifup, qdev->ndev,
1187 "Could not get a page chunk.\n");
1188 return;
1189 }
Ron Mercer7c734352009-10-19 03:32:19 +00001190
1191 map = lbq_desc->p.pg_chunk.map +
1192 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001193 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1194 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001195 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001196 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001197
1198 pci_dma_sync_single_for_device(qdev->pdev, map,
1199 rx_ring->lbq_buf_size,
1200 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 clean_idx++;
1202 if (clean_idx == rx_ring->lbq_len)
1203 clean_idx = 0;
1204 }
1205
1206 rx_ring->lbq_clean_idx = clean_idx;
1207 rx_ring->lbq_prod_idx += 16;
1208 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1209 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001210 rx_ring->lbq_free_cnt -= 16;
1211 }
1212
1213 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1215 "lbq: updating prod idx = %d.\n",
1216 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001217 ql_write_db_reg(rx_ring->lbq_prod_idx,
1218 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001219 }
1220}
1221
1222/* Process (refill) a small buffer queue. */
1223static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1224{
Ron Mercer49f21862009-02-23 10:42:16 +00001225 u32 clean_idx = rx_ring->sbq_clean_idx;
1226 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001227 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 u64 map;
1229 int i;
1230
1231 while (rx_ring->sbq_free_cnt > 16) {
1232 for (i = 0; i < 16; i++) {
1233 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001234 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1235 "sbq: try cleaning clean_idx = %d.\n",
1236 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001237 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001238 netif_printk(qdev, rx_status, KERN_DEBUG,
1239 qdev->ndev,
1240 "sbq: getting new skb for index %d.\n",
1241 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001242 sbq_desc->p.skb =
1243 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001244 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001245 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001246 netif_err(qdev, probe, qdev->ndev,
1247 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001248 rx_ring->sbq_clean_idx = clean_idx;
1249 return;
1250 }
1251 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1252 map = pci_map_single(qdev->pdev,
1253 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001254 rx_ring->sbq_buf_size,
1255 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001256 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001257 netif_err(qdev, ifup, qdev->ndev,
1258 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001259 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001260 dev_kfree_skb_any(sbq_desc->p.skb);
1261 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001262 return;
1263 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001264 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1265 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001266 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001267 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001268 }
1269
1270 clean_idx++;
1271 if (clean_idx == rx_ring->sbq_len)
1272 clean_idx = 0;
1273 }
1274 rx_ring->sbq_clean_idx = clean_idx;
1275 rx_ring->sbq_prod_idx += 16;
1276 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1277 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001278 rx_ring->sbq_free_cnt -= 16;
1279 }
1280
1281 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001282 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1283 "sbq: updating prod idx = %d.\n",
1284 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001285 ql_write_db_reg(rx_ring->sbq_prod_idx,
1286 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288}
1289
1290static void ql_update_buffer_queues(struct ql_adapter *qdev,
1291 struct rx_ring *rx_ring)
1292{
1293 ql_update_sbq(qdev, rx_ring);
1294 ql_update_lbq(qdev, rx_ring);
1295}
1296
1297/* Unmaps tx buffers. Can be called from send() if a pci mapping
1298 * fails at some stage, or from the interrupt when a tx completes.
1299 */
1300static void ql_unmap_send(struct ql_adapter *qdev,
1301 struct tx_ring_desc *tx_ring_desc, int mapped)
1302{
1303 int i;
1304 for (i = 0; i < mapped; i++) {
1305 if (i == 0 || (i == 7 && mapped > 7)) {
1306 /*
1307 * Unmap the skb->data area, or the
1308 * external sglist (AKA the Outbound
1309 * Address List (OAL)).
1310 * If its the zeroeth element, then it's
1311 * the skb->data area. If it's the 7th
1312 * element and there is more than 6 frags,
1313 * then its an OAL.
1314 */
1315 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001316 netif_printk(qdev, tx_done, KERN_DEBUG,
1317 qdev->ndev,
1318 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001319 }
1320 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001321 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001322 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001323 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001324 maplen),
1325 PCI_DMA_TODEVICE);
1326 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001327 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1328 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001329 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001330 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001331 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001332 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333 maplen), PCI_DMA_TODEVICE);
1334 }
1335 }
1336
1337}
1338
1339/* Map the buffers for this transmit. This will return
1340 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1341 */
1342static int ql_map_send(struct ql_adapter *qdev,
1343 struct ob_mac_iocb_req *mac_iocb_ptr,
1344 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1345{
1346 int len = skb_headlen(skb);
1347 dma_addr_t map;
1348 int frag_idx, err, map_idx = 0;
1349 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1350 int frag_cnt = skb_shinfo(skb)->nr_frags;
1351
1352 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001353 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1354 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001355 }
1356 /*
1357 * Map the skb buffer first.
1358 */
1359 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1360
1361 err = pci_dma_mapping_error(qdev->pdev, map);
1362 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001363 netif_err(qdev, tx_queued, qdev->ndev,
1364 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001365
1366 return NETDEV_TX_BUSY;
1367 }
1368
1369 tbd->len = cpu_to_le32(len);
1370 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001371 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1372 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001373 map_idx++;
1374
1375 /*
1376 * This loop fills the remainder of the 8 address descriptors
1377 * in the IOCB. If there are more than 7 fragments, then the
1378 * eighth address desc will point to an external list (OAL).
1379 * When this happens, the remainder of the frags will be stored
1380 * in this list.
1381 */
1382 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1383 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1384 tbd++;
1385 if (frag_idx == 6 && frag_cnt > 7) {
1386 /* Let's tack on an sglist.
1387 * Our control block will now
1388 * look like this:
1389 * iocb->seg[0] = skb->data
1390 * iocb->seg[1] = frag[0]
1391 * iocb->seg[2] = frag[1]
1392 * iocb->seg[3] = frag[2]
1393 * iocb->seg[4] = frag[3]
1394 * iocb->seg[5] = frag[4]
1395 * iocb->seg[6] = frag[5]
1396 * iocb->seg[7] = ptr to OAL (external sglist)
1397 * oal->seg[0] = frag[6]
1398 * oal->seg[1] = frag[7]
1399 * oal->seg[2] = frag[8]
1400 * oal->seg[3] = frag[9]
1401 * oal->seg[4] = frag[10]
1402 * etc...
1403 */
1404 /* Tack on the OAL in the eighth segment of IOCB. */
1405 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1406 sizeof(struct oal),
1407 PCI_DMA_TODEVICE);
1408 err = pci_dma_mapping_error(qdev->pdev, map);
1409 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001410 netif_err(qdev, tx_queued, qdev->ndev,
1411 "PCI mapping outbound address list with error: %d\n",
1412 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001413 goto map_error;
1414 }
1415
1416 tbd->addr = cpu_to_le64(map);
1417 /*
1418 * The length is the number of fragments
1419 * that remain to be mapped times the length
1420 * of our sglist (OAL).
1421 */
1422 tbd->len =
1423 cpu_to_le32((sizeof(struct tx_buf_desc) *
1424 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001425 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001426 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001427 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001428 sizeof(struct oal));
1429 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1430 map_idx++;
1431 }
1432
1433 map =
1434 pci_map_page(qdev->pdev, frag->page,
1435 frag->page_offset, frag->size,
1436 PCI_DMA_TODEVICE);
1437
1438 err = pci_dma_mapping_error(qdev->pdev, map);
1439 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001440 netif_err(qdev, tx_queued, qdev->ndev,
1441 "PCI mapping frags failed with error: %d.\n",
1442 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001443 goto map_error;
1444 }
1445
1446 tbd->addr = cpu_to_le64(map);
1447 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001448 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1449 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001450 frag->size);
1451
1452 }
1453 /* Save the number of segments we've mapped. */
1454 tx_ring_desc->map_cnt = map_idx;
1455 /* Terminate the last segment. */
1456 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1457 return NETDEV_TX_OK;
1458
1459map_error:
1460 /*
1461 * If the first frag mapping failed, then i will be zero.
1462 * This causes the unmap of the skb->data area. Otherwise
1463 * we pass in the number of frags that mapped successfully
1464 * so they can be umapped.
1465 */
1466 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1467 return NETDEV_TX_BUSY;
1468}
1469
Ron Mercer4f848c02010-01-02 10:37:43 +00001470/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001471static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1472 struct rx_ring *rx_ring,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 u32 length,
1475 u16 vlan_id)
1476{
1477 struct sk_buff *skb;
1478 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1479 struct skb_frag_struct *rx_frag;
1480 int nr_frags;
1481 struct napi_struct *napi = &rx_ring->napi;
1482
1483 napi->dev = qdev->ndev;
1484
1485 skb = napi_get_frags(napi);
1486 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001487 netif_err(qdev, drv, qdev->ndev,
1488 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001489 rx_ring->rx_dropped++;
1490 put_page(lbq_desc->p.pg_chunk.page);
1491 return;
1492 }
1493 prefetch(lbq_desc->p.pg_chunk.va);
1494 rx_frag = skb_shinfo(skb)->frags;
1495 nr_frags = skb_shinfo(skb)->nr_frags;
1496 rx_frag += nr_frags;
1497 rx_frag->page = lbq_desc->p.pg_chunk.page;
1498 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1499 rx_frag->size = length;
1500
1501 skb->len += length;
1502 skb->data_len += length;
1503 skb->truesize += length;
1504 skb_shinfo(skb)->nr_frags++;
1505
1506 rx_ring->rx_packets++;
1507 rx_ring->rx_bytes += length;
1508 skb->ip_summed = CHECKSUM_UNNECESSARY;
1509 skb_record_rx_queue(skb, rx_ring->cq_id);
1510 if (qdev->vlgrp && (vlan_id != 0xffff))
1511 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1512 else
1513 napi_gro_frags(napi);
1514}
1515
1516/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001517static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1518 struct rx_ring *rx_ring,
1519 struct ib_mac_iocb_rsp *ib_mac_rsp,
1520 u32 length,
1521 u16 vlan_id)
1522{
1523 struct net_device *ndev = qdev->ndev;
1524 struct sk_buff *skb = NULL;
1525 void *addr;
1526 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1527 struct napi_struct *napi = &rx_ring->napi;
1528
1529 skb = netdev_alloc_skb(ndev, length);
1530 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001531 netif_err(qdev, drv, qdev->ndev,
1532 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001533 rx_ring->rx_dropped++;
1534 put_page(lbq_desc->p.pg_chunk.page);
1535 return;
1536 }
1537
1538 addr = lbq_desc->p.pg_chunk.va;
1539 prefetch(addr);
1540
1541
1542 /* Frame error, so drop the packet. */
1543 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001544 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001545 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001546 rx_ring->rx_errors++;
1547 goto err_out;
1548 }
1549
1550 /* The max framesize filter on this chip is set higher than
1551 * MTU since FCoE uses 2k frames.
1552 */
1553 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001554 netif_err(qdev, drv, qdev->ndev,
1555 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001556 rx_ring->rx_dropped++;
1557 goto err_out;
1558 }
1559 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001560 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1561 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1562 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001563 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1564 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1565 length-ETH_HLEN);
1566 skb->len += length-ETH_HLEN;
1567 skb->data_len += length-ETH_HLEN;
1568 skb->truesize += length-ETH_HLEN;
1569
1570 rx_ring->rx_packets++;
1571 rx_ring->rx_bytes += skb->len;
1572 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001573 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001574
Michał Mirosław88230fd2011-04-18 13:31:21 +00001575 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001576 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1577 /* TCP frame. */
1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001579 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1580 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001581 skb->ip_summed = CHECKSUM_UNNECESSARY;
1582 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1583 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1584 /* Unfragmented ipv4 UDP frame. */
1585 struct iphdr *iph = (struct iphdr *) skb->data;
1586 if (!(iph->frag_off &
1587 cpu_to_be16(IP_MF|IP_OFFSET))) {
1588 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001589 netif_printk(qdev, rx_status, KERN_DEBUG,
1590 qdev->ndev,
1591 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001592 }
1593 }
1594 }
1595
1596 skb_record_rx_queue(skb, rx_ring->cq_id);
1597 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1598 if (qdev->vlgrp && (vlan_id != 0xffff))
1599 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1600 else
1601 napi_gro_receive(napi, skb);
1602 } else {
1603 if (qdev->vlgrp && (vlan_id != 0xffff))
1604 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1605 else
1606 netif_receive_skb(skb);
1607 }
1608 return;
1609err_out:
1610 dev_kfree_skb_any(skb);
1611 put_page(lbq_desc->p.pg_chunk.page);
1612}
1613
1614/* Process an inbound completion from an rx ring. */
1615static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1616 struct rx_ring *rx_ring,
1617 struct ib_mac_iocb_rsp *ib_mac_rsp,
1618 u32 length,
1619 u16 vlan_id)
1620{
1621 struct net_device *ndev = qdev->ndev;
1622 struct sk_buff *skb = NULL;
1623 struct sk_buff *new_skb = NULL;
1624 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1625
1626 skb = sbq_desc->p.skb;
1627 /* Allocate new_skb and copy */
1628 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1629 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001630 netif_err(qdev, probe, qdev->ndev,
1631 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001632 rx_ring->rx_dropped++;
1633 return;
1634 }
1635 skb_reserve(new_skb, NET_IP_ALIGN);
1636 memcpy(skb_put(new_skb, length), skb->data, length);
1637 skb = new_skb;
1638
1639 /* Frame error, so drop the packet. */
1640 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001641 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001642 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001643 dev_kfree_skb_any(skb);
1644 rx_ring->rx_errors++;
1645 return;
1646 }
1647
1648 /* loopback self test for ethtool */
1649 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1650 ql_check_lb_frame(qdev, skb);
1651 dev_kfree_skb_any(skb);
1652 return;
1653 }
1654
1655 /* The max framesize filter on this chip is set higher than
1656 * MTU since FCoE uses 2k frames.
1657 */
1658 if (skb->len > ndev->mtu + ETH_HLEN) {
1659 dev_kfree_skb_any(skb);
1660 rx_ring->rx_dropped++;
1661 return;
1662 }
1663
1664 prefetch(skb->data);
1665 skb->dev = ndev;
1666 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001667 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668 "%s Multicast.\n",
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1671 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1672 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1673 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1674 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001675 }
1676 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001677 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1678 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001679
1680 rx_ring->rx_packets++;
1681 rx_ring->rx_bytes += skb->len;
1682 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001683 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001684
1685 /* If rx checksum is on, and there are no
1686 * csum or frame errors.
1687 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001688 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001689 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1690 /* TCP frame. */
1691 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001692 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1693 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001694 skb->ip_summed = CHECKSUM_UNNECESSARY;
1695 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1696 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1697 /* Unfragmented ipv4 UDP frame. */
1698 struct iphdr *iph = (struct iphdr *) skb->data;
1699 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001700 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001701 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001702 netif_printk(qdev, rx_status, KERN_DEBUG,
1703 qdev->ndev,
1704 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001705 }
1706 }
1707 }
1708
1709 skb_record_rx_queue(skb, rx_ring->cq_id);
1710 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1711 if (qdev->vlgrp && (vlan_id != 0xffff))
1712 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1713 vlan_id, skb);
1714 else
1715 napi_gro_receive(&rx_ring->napi, skb);
1716 } else {
1717 if (qdev->vlgrp && (vlan_id != 0xffff))
1718 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1719 else
1720 netif_receive_skb(skb);
1721 }
1722}
1723
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001724static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001725{
1726 void *temp_addr = skb->data;
1727
1728 /* Undo the skb_reserve(skb,32) we did before
1729 * giving to hardware, and realign data on
1730 * a 2-byte boundary.
1731 */
1732 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1733 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1734 skb_copy_to_linear_data(skb, temp_addr,
1735 (unsigned int)len);
1736}
1737
1738/*
1739 * This function builds an skb for the given inbound
1740 * completion. It will be rewritten for readability in the near
1741 * future, but for not it works well.
1742 */
1743static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1744 struct rx_ring *rx_ring,
1745 struct ib_mac_iocb_rsp *ib_mac_rsp)
1746{
1747 struct bq_desc *lbq_desc;
1748 struct bq_desc *sbq_desc;
1749 struct sk_buff *skb = NULL;
1750 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1751 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1752
1753 /*
1754 * Handle the header buffer if present.
1755 */
1756 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1757 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001758 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1759 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760 /*
1761 * Headers fit nicely into a small buffer.
1762 */
1763 sbq_desc = ql_get_curr_sbuf(rx_ring);
1764 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001765 dma_unmap_addr(sbq_desc, mapaddr),
1766 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001767 PCI_DMA_FROMDEVICE);
1768 skb = sbq_desc->p.skb;
1769 ql_realign_skb(skb, hdr_len);
1770 skb_put(skb, hdr_len);
1771 sbq_desc->p.skb = NULL;
1772 }
1773
1774 /*
1775 * Handle the data buffer(s).
1776 */
1777 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001778 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001780 return skb;
1781 }
1782
1783 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1784 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001785 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786 "Headers in small, data of %d bytes in small, combine them.\n",
1787 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001788 /*
1789 * Data is less than small buffer size so it's
1790 * stuffed in a small buffer.
1791 * For this case we append the data
1792 * from the "data" small buffer to the "header" small
1793 * buffer.
1794 */
1795 sbq_desc = ql_get_curr_sbuf(rx_ring);
1796 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001797 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001798 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001799 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001800 (sbq_desc, maplen),
1801 PCI_DMA_FROMDEVICE);
1802 memcpy(skb_put(skb, length),
1803 sbq_desc->p.skb->data, length);
1804 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001805 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 (sbq_desc,
1807 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001808 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001809 (sbq_desc,
1810 maplen),
1811 PCI_DMA_FROMDEVICE);
1812 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "%d bytes in a single small buffer.\n",
1815 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 sbq_desc = ql_get_curr_sbuf(rx_ring);
1817 skb = sbq_desc->p.skb;
1818 ql_realign_skb(skb, length);
1819 skb_put(skb, length);
1820 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001821 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001822 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001823 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 maplen),
1825 PCI_DMA_FROMDEVICE);
1826 sbq_desc->p.skb = NULL;
1827 }
1828 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1829 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001830 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831 "Header in small, %d bytes in large. Chain large to small!\n",
1832 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001833 /*
1834 * The data is in a single large buffer. We
1835 * chain it to the header buffer's skb and let
1836 * it rip.
1837 */
Ron Mercer7c734352009-10-19 03:32:19 +00001838 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001839 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1840 "Chaining page at offset = %d, for %d bytes to skb.\n",
1841 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001842 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1843 lbq_desc->p.pg_chunk.offset,
1844 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001845 skb->len += length;
1846 skb->data_len += length;
1847 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001848 } else {
1849 /*
1850 * The headers and data are in a single large buffer. We
1851 * copy it to a new skb and let it go. This can happen with
1852 * jumbo mtu on a non-TCP/UDP frame.
1853 */
Ron Mercer7c734352009-10-19 03:32:19 +00001854 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 skb = netdev_alloc_skb(qdev->ndev, length);
1856 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001857 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1858 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001859 return NULL;
1860 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001861 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001862 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001863 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001864 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001865 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001867 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1868 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1869 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001870 skb_fill_page_desc(skb, 0,
1871 lbq_desc->p.pg_chunk.page,
1872 lbq_desc->p.pg_chunk.offset,
1873 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001874 skb->len += length;
1875 skb->data_len += length;
1876 skb->truesize += length;
1877 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 __pskb_pull_tail(skb,
1879 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1880 VLAN_ETH_HLEN : ETH_HLEN);
1881 }
1882 } else {
1883 /*
1884 * The data is in a chain of large buffers
1885 * pointed to by a small buffer. We loop
1886 * thru and chain them to the our small header
1887 * buffer's skb.
1888 * frags: There are 18 max frags and our small
1889 * buffer will hold 32 of them. The thing is,
1890 * we'll use 3 max for our 9000 byte jumbo
1891 * frames. If the MTU goes up we could
1892 * eventually be in trouble.
1893 */
Ron Mercer7c734352009-10-19 03:32:19 +00001894 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895 sbq_desc = ql_get_curr_sbuf(rx_ring);
1896 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001897 dma_unmap_addr(sbq_desc, mapaddr),
1898 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899 PCI_DMA_FROMDEVICE);
1900 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1901 /*
1902 * This is an non TCP/UDP IP frame, so
1903 * the headers aren't split into a small
1904 * buffer. We have to use the small buffer
1905 * that contains our sg list as our skb to
1906 * send upstairs. Copy the sg list here to
1907 * a local buffer and use it to find the
1908 * pages to chain.
1909 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001910 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1911 "%d bytes of headers & data in chain of large.\n",
1912 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001913 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001914 sbq_desc->p.skb = NULL;
1915 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001916 }
1917 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001918 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1919 size = (length < rx_ring->lbq_buf_size) ? length :
1920 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001921
Joe Perchesae9540f72010-02-09 11:49:52 +00001922 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1923 "Adding page %d to skb for %d bytes.\n",
1924 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001925 skb_fill_page_desc(skb, i,
1926 lbq_desc->p.pg_chunk.page,
1927 lbq_desc->p.pg_chunk.offset,
1928 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929 skb->len += size;
1930 skb->data_len += size;
1931 skb->truesize += size;
1932 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001933 i++;
1934 }
1935 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1936 VLAN_ETH_HLEN : ETH_HLEN);
1937 }
1938 return skb;
1939}
1940
1941/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001942static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001943 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001944 struct ib_mac_iocb_rsp *ib_mac_rsp,
1945 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001946{
1947 struct net_device *ndev = qdev->ndev;
1948 struct sk_buff *skb = NULL;
1949
1950 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1951
1952 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1953 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001954 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1955 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001956 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001957 return;
1958 }
1959
Ron Mercera32959c2009-06-09 05:39:27 +00001960 /* Frame error, so drop the packet. */
1961 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001962 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001963 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001964 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001965 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001966 return;
1967 }
Ron Mercerec33a492009-06-09 05:39:28 +00001968
1969 /* The max framesize filter on this chip is set higher than
1970 * MTU since FCoE uses 2k frames.
1971 */
1972 if (skb->len > ndev->mtu + ETH_HLEN) {
1973 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001974 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001975 return;
1976 }
1977
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001978 /* loopback self test for ethtool */
1979 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1980 ql_check_lb_frame(qdev, skb);
1981 dev_kfree_skb_any(skb);
1982 return;
1983 }
1984
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001985 prefetch(skb->data);
1986 skb->dev = ndev;
1987 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001988 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1989 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1990 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1991 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1992 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1993 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1994 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001995 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001996 }
1997 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001998 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1999 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002000 }
Ron Mercerd555f592009-03-09 10:59:19 +00002001
Ron Mercerd555f592009-03-09 10:59:19 +00002002 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002003 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002004
2005 /* If rx checksum is on, and there are no
2006 * csum or frame errors.
2007 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00002008 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00002009 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2010 /* TCP frame. */
2011 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002012 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2013 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002014 skb->ip_summed = CHECKSUM_UNNECESSARY;
2015 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2016 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2017 /* Unfragmented ipv4 UDP frame. */
2018 struct iphdr *iph = (struct iphdr *) skb->data;
2019 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002020 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002021 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002022 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2023 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002024 }
2025 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002026 }
Ron Mercerd555f592009-03-09 10:59:19 +00002027
Ron Mercer885ee392009-11-03 13:49:31 +00002028 rx_ring->rx_packets++;
2029 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002030 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002031 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2032 if (qdev->vlgrp &&
2033 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2034 (vlan_id != 0))
2035 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2036 vlan_id, skb);
2037 else
2038 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002039 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002040 if (qdev->vlgrp &&
2041 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2042 (vlan_id != 0))
2043 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2044 else
2045 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002046 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002047}
2048
Ron Mercer4f848c02010-01-02 10:37:43 +00002049/* Process an inbound completion from an rx ring. */
2050static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2051 struct rx_ring *rx_ring,
2052 struct ib_mac_iocb_rsp *ib_mac_rsp)
2053{
2054 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2055 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2056 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2057 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2058
2059 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2060
2061 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2062 /* The data and headers are split into
2063 * separate buffers.
2064 */
2065 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 vlan_id);
2067 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2068 /* The data fit in a single small buffer.
2069 * Allocate a new skb, copy the data and
2070 * return the buffer to the free pool.
2071 */
2072 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2073 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002074 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2075 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2076 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2077 /* TCP packet in a page chunk that's been checksummed.
2078 * Tack it on to our GRO skb and let it go.
2079 */
2080 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2081 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002082 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2083 /* Non-TCP packet in a page chunk. Allocate an
2084 * skb, tack it on frags, and send it up.
2085 */
2086 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2087 length, vlan_id);
2088 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002089 /* Non-TCP/UDP large frames that span multiple buffers
2090 * can be processed corrrectly by the split frame logic.
2091 */
2092 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2093 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002094 }
2095
2096 return (unsigned long)length;
2097}
2098
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002099/* Process an outbound completion from an rx ring. */
2100static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2101 struct ob_mac_iocb_rsp *mac_rsp)
2102{
2103 struct tx_ring *tx_ring;
2104 struct tx_ring_desc *tx_ring_desc;
2105
2106 QL_DUMP_OB_MAC_RSP(mac_rsp);
2107 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2108 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2109 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002110 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2111 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002112 dev_kfree_skb(tx_ring_desc->skb);
2113 tx_ring_desc->skb = NULL;
2114
2115 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2116 OB_MAC_IOCB_RSP_S |
2117 OB_MAC_IOCB_RSP_L |
2118 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2119 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002120 netif_warn(qdev, tx_done, qdev->ndev,
2121 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 }
2123 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002124 netif_warn(qdev, tx_done, qdev->ndev,
2125 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 }
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002130 }
2131 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002132 netif_warn(qdev, tx_done, qdev->ndev,
2133 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002134 }
2135 }
2136 atomic_inc(&tx_ring->tx_count);
2137}
2138
2139/* Fire up a handler to reset the MPI processor. */
2140void ql_queue_fw_error(struct ql_adapter *qdev)
2141{
Ron Mercer6a473302009-07-02 06:06:12 +00002142 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002143 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2144}
2145
2146void ql_queue_asic_error(struct ql_adapter *qdev)
2147{
Ron Mercer6a473302009-07-02 06:06:12 +00002148 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002149 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002150 /* Clear adapter up bit to signal the recovery
2151 * process that it shouldn't kill the reset worker
2152 * thread
2153 */
2154 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002155 /* Set asic recovery bit to indicate reset process that we are
2156 * in fatal error recovery process rather than normal close
2157 */
2158 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002159 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2160}
2161
2162static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2163 struct ib_ae_iocb_rsp *ib_ae_rsp)
2164{
2165 switch (ib_ae_rsp->event) {
2166 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002167 netif_err(qdev, rx_err, qdev->ndev,
2168 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002169 ql_queue_fw_error(qdev);
2170 return;
2171
2172 case CAM_LOOKUP_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002173 netif_err(qdev, link, qdev->ndev,
2174 "Multiple CAM hits lookup occurred.\n");
2175 netif_err(qdev, drv, qdev->ndev,
2176 "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002177 ql_queue_asic_error(qdev);
2178 return;
2179
2180 case SOFT_ECC_ERROR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002181 netif_err(qdev, rx_err, qdev->ndev,
2182 "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002183 ql_queue_asic_error(qdev);
2184 break;
2185
2186 case PCI_ERR_ANON_BUF_RD:
Joe Perchesae9540f72010-02-09 11:49:52 +00002187 netif_err(qdev, rx_err, qdev->ndev,
2188 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2189 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002190 ql_queue_asic_error(qdev);
2191 break;
2192
2193 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002194 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2195 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002196 ql_queue_asic_error(qdev);
2197 break;
2198 }
2199}
2200
2201static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2202{
2203 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002204 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002205 struct ob_mac_iocb_rsp *net_rsp = NULL;
2206 int count = 0;
2207
Ron Mercer1e213302009-03-09 10:59:21 +00002208 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002209 /* While there are entries in the completion queue. */
2210 while (prod != rx_ring->cnsmr_idx) {
2211
Joe Perchesae9540f72010-02-09 11:49:52 +00002212 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2213 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2214 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002215
2216 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2217 rmb();
2218 switch (net_rsp->opcode) {
2219
2220 case OPCODE_OB_MAC_TSO_IOCB:
2221 case OPCODE_OB_MAC_IOCB:
2222 ql_process_mac_tx_intr(qdev, net_rsp);
2223 break;
2224 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002225 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2226 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2227 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002228 }
2229 count++;
2230 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002231 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002232 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002233 if (!net_rsp)
2234 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002235 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002236 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002237 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002238 if (atomic_read(&tx_ring->queue_stopped) &&
2239 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2240 /*
2241 * The queue got stopped because the tx_ring was full.
2242 * Wake it up, because it's now at least 25% empty.
2243 */
Ron Mercer1e213302009-03-09 10:59:21 +00002244 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002245 }
2246
2247 return count;
2248}
2249
2250static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2251{
2252 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002253 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002254 struct ql_net_rsp_iocb *net_rsp;
2255 int count = 0;
2256
2257 /* While there are entries in the completion queue. */
2258 while (prod != rx_ring->cnsmr_idx) {
2259
Joe Perchesae9540f72010-02-09 11:49:52 +00002260 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2261 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2262 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002263
2264 net_rsp = rx_ring->curr_entry;
2265 rmb();
2266 switch (net_rsp->opcode) {
2267 case OPCODE_IB_MAC_IOCB:
2268 ql_process_mac_rx_intr(qdev, rx_ring,
2269 (struct ib_mac_iocb_rsp *)
2270 net_rsp);
2271 break;
2272
2273 case OPCODE_IB_AE_IOCB:
2274 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2275 net_rsp);
2276 break;
2277 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2280 net_rsp->opcode);
2281 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002282 }
2283 count++;
2284 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002285 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002286 if (count == budget)
2287 break;
2288 }
2289 ql_update_buffer_queues(qdev, rx_ring);
2290 ql_write_cq_idx(rx_ring);
2291 return count;
2292}
2293
2294static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2295{
2296 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2297 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002298 struct rx_ring *trx_ring;
2299 int i, work_done = 0;
2300 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002301
Joe Perchesae9540f72010-02-09 11:49:52 +00002302 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2303 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002304
Ron Mercer39aa8162009-08-27 11:02:11 +00002305 /* Service the TX rings first. They start
2306 * right after the RSS rings. */
2307 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2308 trx_ring = &qdev->rx_ring[i];
2309 /* If this TX completion ring belongs to this vector and
2310 * it's not empty then service it.
2311 */
2312 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2313 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2314 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002315 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2316 "%s: Servicing TX completion ring %d.\n",
2317 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002318 ql_clean_outbound_rx_ring(trx_ring);
2319 }
2320 }
2321
2322 /*
2323 * Now service the RSS ring if it's active.
2324 */
2325 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2326 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002327 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2328 "%s: Servicing RX completion ring %d.\n",
2329 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002330 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2331 }
2332
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002333 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002334 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002335 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2336 }
2337 return work_done;
2338}
2339
Ron Mercer01e6b952009-10-30 12:13:34 +00002340static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002341{
2342 struct ql_adapter *qdev = netdev_priv(ndev);
2343
2344 qdev->vlgrp = grp;
2345 if (grp) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002346 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2347 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2349 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2350 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00002351 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2352 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002353 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2354 }
2355}
2356
Ron Mercer01e6b952009-10-30 12:13:34 +00002357static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002358{
2359 struct ql_adapter *qdev = netdev_priv(ndev);
2360 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002361 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002362
Ron Mercercc288f52009-02-23 10:42:14 +00002363 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2364 if (status)
2365 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002366 if (ql_set_mac_addr_reg
2367 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002368 netif_err(qdev, ifup, qdev->ndev,
2369 "Failed to init vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002370 }
Ron Mercercc288f52009-02-23 10:42:14 +00002371 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002372}
2373
Ron Mercer01e6b952009-10-30 12:13:34 +00002374static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002375{
2376 struct ql_adapter *qdev = netdev_priv(ndev);
2377 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002378 int status;
2379
2380 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2381 if (status)
2382 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002383
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384 if (ql_set_mac_addr_reg
2385 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002386 netif_err(qdev, ifup, qdev->ndev,
2387 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002388 }
Ron Mercercc288f52009-02-23 10:42:14 +00002389 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002390
2391}
2392
Ron Mercerc1b60092010-10-27 04:58:12 +00002393static void qlge_restore_vlan(struct ql_adapter *qdev)
2394{
2395 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2396
2397 if (qdev->vlgrp) {
2398 u16 vid;
2399 for (vid = 0; vid < VLAN_N_VID; vid++) {
2400 if (!vlan_group_get_device(qdev->vlgrp, vid))
2401 continue;
2402 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2403 }
2404 }
2405}
2406
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002407/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2408static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2409{
2410 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002411 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002412 return IRQ_HANDLED;
2413}
2414
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002415/* This handles a fatal error, MPI activity, and the default
2416 * rx_ring in an MSI-X multiple vector environment.
2417 * In MSI/Legacy environment it also process the rest of
2418 * the rx_rings.
2419 */
2420static irqreturn_t qlge_isr(int irq, void *dev_id)
2421{
2422 struct rx_ring *rx_ring = dev_id;
2423 struct ql_adapter *qdev = rx_ring->qdev;
2424 struct intr_context *intr_context = &qdev->intr_context[0];
2425 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002426 int work_done = 0;
2427
Ron Mercerbb0d2152008-10-20 10:30:26 -07002428 spin_lock(&qdev->hw_lock);
2429 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002430 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2431 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002432 spin_unlock(&qdev->hw_lock);
2433 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002435 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002436
Ron Mercerbb0d2152008-10-20 10:30:26 -07002437 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002438
2439 /*
2440 * Check for fatal error.
2441 */
2442 if (var & STS_FE) {
2443 ql_queue_asic_error(qdev);
Joe Perchesae9540f72010-02-09 11:49:52 +00002444 netif_err(qdev, intr, qdev->ndev,
2445 "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002446 var = ql_read32(qdev, ERR_STS);
Joe Perchesae9540f72010-02-09 11:49:52 +00002447 netif_err(qdev, intr, qdev->ndev,
2448 "Resetting chip. Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002449 return IRQ_HANDLED;
2450 }
2451
2452 /*
2453 * Check MPI processor activity.
2454 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002455 if ((var & STS_PI) &&
2456 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002457 /*
2458 * We've got an async event or mailbox completion.
2459 * Handle it and clear the source of the interrupt.
2460 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002461 netif_err(qdev, intr, qdev->ndev,
2462 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002463 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002464 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2465 queue_delayed_work_on(smp_processor_id(),
2466 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002467 work_done++;
2468 }
2469
2470 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002471 * Get the bit-mask that shows the active queues for this
2472 * pass. Compare it to the queues that this irq services
2473 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002474 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002475 var = ql_read32(qdev, ISR1);
2476 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002477 netif_info(qdev, intr, qdev->ndev,
2478 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002479 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002480 napi_schedule(&rx_ring->napi);
2481 work_done++;
2482 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002483 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002484 return work_done ? IRQ_HANDLED : IRQ_NONE;
2485}
2486
2487static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2488{
2489
2490 if (skb_is_gso(skb)) {
2491 int err;
2492 if (skb_header_cloned(skb)) {
2493 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2494 if (err)
2495 return err;
2496 }
2497
2498 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2499 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2500 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2501 mac_iocb_ptr->total_hdrs_len =
2502 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2503 mac_iocb_ptr->net_trans_offset =
2504 cpu_to_le16(skb_network_offset(skb) |
2505 skb_transport_offset(skb)
2506 << OB_MAC_TRANSPORT_HDR_SHIFT);
2507 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2508 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2509 if (likely(skb->protocol == htons(ETH_P_IP))) {
2510 struct iphdr *iph = ip_hdr(skb);
2511 iph->check = 0;
2512 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2513 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2514 iph->daddr, 0,
2515 IPPROTO_TCP,
2516 0);
2517 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2518 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2519 tcp_hdr(skb)->check =
2520 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2521 &ipv6_hdr(skb)->daddr,
2522 0, IPPROTO_TCP, 0);
2523 }
2524 return 1;
2525 }
2526 return 0;
2527}
2528
2529static void ql_hw_csum_setup(struct sk_buff *skb,
2530 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2531{
2532 int len;
2533 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002534 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002535 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2536 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2537 mac_iocb_ptr->net_trans_offset =
2538 cpu_to_le16(skb_network_offset(skb) |
2539 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2540
2541 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2542 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2543 if (likely(iph->protocol == IPPROTO_TCP)) {
2544 check = &(tcp_hdr(skb)->check);
2545 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2546 mac_iocb_ptr->total_hdrs_len =
2547 cpu_to_le16(skb_transport_offset(skb) +
2548 (tcp_hdr(skb)->doff << 2));
2549 } else {
2550 check = &(udp_hdr(skb)->check);
2551 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2552 mac_iocb_ptr->total_hdrs_len =
2553 cpu_to_le16(skb_transport_offset(skb) +
2554 sizeof(struct udphdr));
2555 }
2556 *check = ~csum_tcpudp_magic(iph->saddr,
2557 iph->daddr, len, iph->protocol, 0);
2558}
2559
Stephen Hemminger613573252009-08-31 19:50:58 +00002560static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002561{
2562 struct tx_ring_desc *tx_ring_desc;
2563 struct ob_mac_iocb_req *mac_iocb_ptr;
2564 struct ql_adapter *qdev = netdev_priv(ndev);
2565 int tso;
2566 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002567 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002568
2569 tx_ring = &qdev->tx_ring[tx_ring_idx];
2570
Ron Mercer74c50b42009-03-09 10:59:27 +00002571 if (skb_padto(skb, ETH_ZLEN))
2572 return NETDEV_TX_OK;
2573
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002574 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002575 netif_info(qdev, tx_queued, qdev->ndev,
2576 "%s: shutting down tx queue %d du to lack of resources.\n",
2577 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002578 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002579 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002580 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002581 return NETDEV_TX_BUSY;
2582 }
2583 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2584 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002585 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002586
2587 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2588 mac_iocb_ptr->tid = tx_ring_desc->index;
2589 /* We use the upper 32-bits to store the tx queue for this IO.
2590 * When we get the completion we can use it to establish the context.
2591 */
2592 mac_iocb_ptr->txq_idx = tx_ring_idx;
2593 tx_ring_desc->skb = skb;
2594
2595 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2596
Jesse Grosseab6d182010-10-20 13:56:03 +00002597 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002598 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2599 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002600 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2601 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2602 }
2603 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2604 if (tso < 0) {
2605 dev_kfree_skb_any(skb);
2606 return NETDEV_TX_OK;
2607 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2608 ql_hw_csum_setup(skb,
2609 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2610 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002611 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2612 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002613 netif_err(qdev, tx_queued, qdev->ndev,
2614 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002615 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002616 return NETDEV_TX_BUSY;
2617 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002618 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2619 tx_ring->prod_idx++;
2620 if (tx_ring->prod_idx == tx_ring->wq_len)
2621 tx_ring->prod_idx = 0;
2622 wmb();
2623
2624 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002625 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2626 "tx queued, slot %d, len %d\n",
2627 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002628
2629 atomic_dec(&tx_ring->tx_count);
2630 return NETDEV_TX_OK;
2631}
2632
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002633
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002634static void ql_free_shadow_space(struct ql_adapter *qdev)
2635{
2636 if (qdev->rx_ring_shadow_reg_area) {
2637 pci_free_consistent(qdev->pdev,
2638 PAGE_SIZE,
2639 qdev->rx_ring_shadow_reg_area,
2640 qdev->rx_ring_shadow_reg_dma);
2641 qdev->rx_ring_shadow_reg_area = NULL;
2642 }
2643 if (qdev->tx_ring_shadow_reg_area) {
2644 pci_free_consistent(qdev->pdev,
2645 PAGE_SIZE,
2646 qdev->tx_ring_shadow_reg_area,
2647 qdev->tx_ring_shadow_reg_dma);
2648 qdev->tx_ring_shadow_reg_area = NULL;
2649 }
2650}
2651
2652static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2653{
2654 qdev->rx_ring_shadow_reg_area =
2655 pci_alloc_consistent(qdev->pdev,
2656 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2657 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002658 netif_err(qdev, ifup, qdev->ndev,
2659 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002660 return -ENOMEM;
2661 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002662 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002663 qdev->tx_ring_shadow_reg_area =
2664 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2665 &qdev->tx_ring_shadow_reg_dma);
2666 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002669 goto err_wqp_sh_area;
2670 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002671 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002672 return 0;
2673
2674err_wqp_sh_area:
2675 pci_free_consistent(qdev->pdev,
2676 PAGE_SIZE,
2677 qdev->rx_ring_shadow_reg_area,
2678 qdev->rx_ring_shadow_reg_dma);
2679 return -ENOMEM;
2680}
2681
2682static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2683{
2684 struct tx_ring_desc *tx_ring_desc;
2685 int i;
2686 struct ob_mac_iocb_req *mac_iocb_ptr;
2687
2688 mac_iocb_ptr = tx_ring->wq_base;
2689 tx_ring_desc = tx_ring->q;
2690 for (i = 0; i < tx_ring->wq_len; i++) {
2691 tx_ring_desc->index = i;
2692 tx_ring_desc->skb = NULL;
2693 tx_ring_desc->queue_entry = mac_iocb_ptr;
2694 mac_iocb_ptr++;
2695 tx_ring_desc++;
2696 }
2697 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2698 atomic_set(&tx_ring->queue_stopped, 0);
2699}
2700
2701static void ql_free_tx_resources(struct ql_adapter *qdev,
2702 struct tx_ring *tx_ring)
2703{
2704 if (tx_ring->wq_base) {
2705 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2706 tx_ring->wq_base, tx_ring->wq_base_dma);
2707 tx_ring->wq_base = NULL;
2708 }
2709 kfree(tx_ring->q);
2710 tx_ring->q = NULL;
2711}
2712
2713static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2714 struct tx_ring *tx_ring)
2715{
2716 tx_ring->wq_base =
2717 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2718 &tx_ring->wq_base_dma);
2719
Joe Perches8e95a202009-12-03 07:58:21 +00002720 if ((tx_ring->wq_base == NULL) ||
2721 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002722 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002723 return -ENOMEM;
2724 }
2725 tx_ring->q =
2726 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2727 if (tx_ring->q == NULL)
2728 goto err;
2729
2730 return 0;
2731err:
2732 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2733 tx_ring->wq_base, tx_ring->wq_base_dma);
2734 return -ENOMEM;
2735}
2736
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002737static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002738{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002739 struct bq_desc *lbq_desc;
2740
Ron Mercer7c734352009-10-19 03:32:19 +00002741 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002742
Ron Mercer7c734352009-10-19 03:32:19 +00002743 curr_idx = rx_ring->lbq_curr_idx;
2744 clean_idx = rx_ring->lbq_clean_idx;
2745 while (curr_idx != clean_idx) {
2746 lbq_desc = &rx_ring->lbq[curr_idx];
2747
2748 if (lbq_desc->p.pg_chunk.last_flag) {
2749 pci_unmap_page(qdev->pdev,
2750 lbq_desc->p.pg_chunk.map,
2751 ql_lbq_block_size(qdev),
2752 PCI_DMA_FROMDEVICE);
2753 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002754 }
Ron Mercer7c734352009-10-19 03:32:19 +00002755
2756 put_page(lbq_desc->p.pg_chunk.page);
2757 lbq_desc->p.pg_chunk.page = NULL;
2758
2759 if (++curr_idx == rx_ring->lbq_len)
2760 curr_idx = 0;
2761
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002762 }
2763}
2764
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002765static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002766{
2767 int i;
2768 struct bq_desc *sbq_desc;
2769
2770 for (i = 0; i < rx_ring->sbq_len; i++) {
2771 sbq_desc = &rx_ring->sbq[i];
2772 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002773 netif_err(qdev, ifup, qdev->ndev,
2774 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775 return;
2776 }
2777 if (sbq_desc->p.skb) {
2778 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002779 dma_unmap_addr(sbq_desc, mapaddr),
2780 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002781 PCI_DMA_FROMDEVICE);
2782 dev_kfree_skb(sbq_desc->p.skb);
2783 sbq_desc->p.skb = NULL;
2784 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002785 }
2786}
2787
Ron Mercer4545a3f2009-02-23 10:42:17 +00002788/* Free all large and small rx buffers associated
2789 * with the completion queues for this device.
2790 */
2791static void ql_free_rx_buffers(struct ql_adapter *qdev)
2792{
2793 int i;
2794 struct rx_ring *rx_ring;
2795
2796 for (i = 0; i < qdev->rx_ring_count; i++) {
2797 rx_ring = &qdev->rx_ring[i];
2798 if (rx_ring->lbq)
2799 ql_free_lbq_buffers(qdev, rx_ring);
2800 if (rx_ring->sbq)
2801 ql_free_sbq_buffers(qdev, rx_ring);
2802 }
2803}
2804
2805static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2806{
2807 struct rx_ring *rx_ring;
2808 int i;
2809
2810 for (i = 0; i < qdev->rx_ring_count; i++) {
2811 rx_ring = &qdev->rx_ring[i];
2812 if (rx_ring->type != TX_Q)
2813 ql_update_buffer_queues(qdev, rx_ring);
2814 }
2815}
2816
2817static void ql_init_lbq_ring(struct ql_adapter *qdev,
2818 struct rx_ring *rx_ring)
2819{
2820 int i;
2821 struct bq_desc *lbq_desc;
2822 __le64 *bq = rx_ring->lbq_base;
2823
2824 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2825 for (i = 0; i < rx_ring->lbq_len; i++) {
2826 lbq_desc = &rx_ring->lbq[i];
2827 memset(lbq_desc, 0, sizeof(*lbq_desc));
2828 lbq_desc->index = i;
2829 lbq_desc->addr = bq;
2830 bq++;
2831 }
2832}
2833
2834static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002835 struct rx_ring *rx_ring)
2836{
2837 int i;
2838 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002839 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002840
Ron Mercer4545a3f2009-02-23 10:42:17 +00002841 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002842 for (i = 0; i < rx_ring->sbq_len; i++) {
2843 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002844 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002845 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002846 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002847 bq++;
2848 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002849}
2850
2851static void ql_free_rx_resources(struct ql_adapter *qdev,
2852 struct rx_ring *rx_ring)
2853{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002854 /* Free the small buffer queue. */
2855 if (rx_ring->sbq_base) {
2856 pci_free_consistent(qdev->pdev,
2857 rx_ring->sbq_size,
2858 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2859 rx_ring->sbq_base = NULL;
2860 }
2861
2862 /* Free the small buffer queue control blocks. */
2863 kfree(rx_ring->sbq);
2864 rx_ring->sbq = NULL;
2865
2866 /* Free the large buffer queue. */
2867 if (rx_ring->lbq_base) {
2868 pci_free_consistent(qdev->pdev,
2869 rx_ring->lbq_size,
2870 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2871 rx_ring->lbq_base = NULL;
2872 }
2873
2874 /* Free the large buffer queue control blocks. */
2875 kfree(rx_ring->lbq);
2876 rx_ring->lbq = NULL;
2877
2878 /* Free the rx queue. */
2879 if (rx_ring->cq_base) {
2880 pci_free_consistent(qdev->pdev,
2881 rx_ring->cq_size,
2882 rx_ring->cq_base, rx_ring->cq_base_dma);
2883 rx_ring->cq_base = NULL;
2884 }
2885}
2886
2887/* Allocate queues and buffers for this completions queue based
2888 * on the values in the parameter structure. */
2889static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2890 struct rx_ring *rx_ring)
2891{
2892
2893 /*
2894 * Allocate the completion queue for this rx_ring.
2895 */
2896 rx_ring->cq_base =
2897 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2898 &rx_ring->cq_base_dma);
2899
2900 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002901 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002902 return -ENOMEM;
2903 }
2904
2905 if (rx_ring->sbq_len) {
2906 /*
2907 * Allocate small buffer queue.
2908 */
2909 rx_ring->sbq_base =
2910 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2911 &rx_ring->sbq_base_dma);
2912
2913 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002914 netif_err(qdev, ifup, qdev->ndev,
2915 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002916 goto err_mem;
2917 }
2918
2919 /*
2920 * Allocate small buffer queue control blocks.
2921 */
2922 rx_ring->sbq =
2923 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2924 GFP_KERNEL);
2925 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002926 netif_err(qdev, ifup, qdev->ndev,
2927 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002928 goto err_mem;
2929 }
2930
Ron Mercer4545a3f2009-02-23 10:42:17 +00002931 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002932 }
2933
2934 if (rx_ring->lbq_len) {
2935 /*
2936 * Allocate large buffer queue.
2937 */
2938 rx_ring->lbq_base =
2939 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2940 &rx_ring->lbq_base_dma);
2941
2942 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002943 netif_err(qdev, ifup, qdev->ndev,
2944 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002945 goto err_mem;
2946 }
2947 /*
2948 * Allocate large buffer queue control blocks.
2949 */
2950 rx_ring->lbq =
2951 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2952 GFP_KERNEL);
2953 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002954 netif_err(qdev, ifup, qdev->ndev,
2955 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002956 goto err_mem;
2957 }
2958
Ron Mercer4545a3f2009-02-23 10:42:17 +00002959 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002960 }
2961
2962 return 0;
2963
2964err_mem:
2965 ql_free_rx_resources(qdev, rx_ring);
2966 return -ENOMEM;
2967}
2968
2969static void ql_tx_ring_clean(struct ql_adapter *qdev)
2970{
2971 struct tx_ring *tx_ring;
2972 struct tx_ring_desc *tx_ring_desc;
2973 int i, j;
2974
2975 /*
2976 * Loop through all queues and free
2977 * any resources.
2978 */
2979 for (j = 0; j < qdev->tx_ring_count; j++) {
2980 tx_ring = &qdev->tx_ring[j];
2981 for (i = 0; i < tx_ring->wq_len; i++) {
2982 tx_ring_desc = &tx_ring->q[i];
2983 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002984 netif_err(qdev, ifdown, qdev->ndev,
2985 "Freeing lost SKB %p, from queue %d, index %d.\n",
2986 tx_ring_desc->skb, j,
2987 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002988 ql_unmap_send(qdev, tx_ring_desc,
2989 tx_ring_desc->map_cnt);
2990 dev_kfree_skb(tx_ring_desc->skb);
2991 tx_ring_desc->skb = NULL;
2992 }
2993 }
2994 }
2995}
2996
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002997static void ql_free_mem_resources(struct ql_adapter *qdev)
2998{
2999 int i;
3000
3001 for (i = 0; i < qdev->tx_ring_count; i++)
3002 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3003 for (i = 0; i < qdev->rx_ring_count; i++)
3004 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3005 ql_free_shadow_space(qdev);
3006}
3007
3008static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3009{
3010 int i;
3011
3012 /* Allocate space for our shadow registers and such. */
3013 if (ql_alloc_shadow_space(qdev))
3014 return -ENOMEM;
3015
3016 for (i = 0; i < qdev->rx_ring_count; i++) {
3017 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003018 netif_err(qdev, ifup, qdev->ndev,
3019 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003020 goto err_mem;
3021 }
3022 }
3023 /* Allocate tx queue resources */
3024 for (i = 0; i < qdev->tx_ring_count; i++) {
3025 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003026 netif_err(qdev, ifup, qdev->ndev,
3027 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003028 goto err_mem;
3029 }
3030 }
3031 return 0;
3032
3033err_mem:
3034 ql_free_mem_resources(qdev);
3035 return -ENOMEM;
3036}
3037
3038/* Set up the rx ring control block and pass it to the chip.
3039 * The control block is defined as
3040 * "Completion Queue Initialization Control Block", or cqicb.
3041 */
3042static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3043{
3044 struct cqicb *cqicb = &rx_ring->cqicb;
3045 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003046 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003047 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003048 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003049 void __iomem *doorbell_area =
3050 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3051 int err = 0;
3052 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003053 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003054 __le64 *base_indirect_ptr;
3055 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003056
3057 /* Set up the shadow registers for this ring. */
3058 rx_ring->prod_idx_sh_reg = shadow_reg;
3059 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003060 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003061 shadow_reg += sizeof(u64);
3062 shadow_reg_dma += sizeof(u64);
3063 rx_ring->lbq_base_indirect = shadow_reg;
3064 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003065 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3066 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003067 rx_ring->sbq_base_indirect = shadow_reg;
3068 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3069
3070 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003071 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072 rx_ring->cnsmr_idx = 0;
3073 rx_ring->curr_entry = rx_ring->cq_base;
3074
3075 /* PCI doorbell mem area + 0x04 for valid register */
3076 rx_ring->valid_db_reg = doorbell_area + 0x04;
3077
3078 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003079 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003080
3081 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003082 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003083
3084 memset((void *)cqicb, 0, sizeof(struct cqicb));
3085 cqicb->msix_vect = rx_ring->irq;
3086
Ron Mercer459caf52009-01-04 17:08:11 -08003087 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3088 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003089
Ron Mercer97345522009-01-09 11:31:50 +00003090 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003091
Ron Mercer97345522009-01-09 11:31:50 +00003092 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003093
3094 /*
3095 * Set up the control block load flags.
3096 */
3097 cqicb->flags = FLAGS_LC | /* Load queue base address */
3098 FLAGS_LV | /* Load MSI-X vector */
3099 FLAGS_LI; /* Load irq delay values */
3100 if (rx_ring->lbq_len) {
3101 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003102 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003103 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3104 page_entries = 0;
3105 do {
3106 *base_indirect_ptr = cpu_to_le64(tmp);
3107 tmp += DB_PAGE_SIZE;
3108 base_indirect_ptr++;
3109 page_entries++;
3110 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003111 cqicb->lbq_addr =
3112 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003113 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3114 (u16) rx_ring->lbq_buf_size;
3115 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3116 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3117 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003118 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003119 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003120 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003121 rx_ring->lbq_clean_idx = 0;
3122 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003123 }
3124 if (rx_ring->sbq_len) {
3125 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003126 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003127 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3128 page_entries = 0;
3129 do {
3130 *base_indirect_ptr = cpu_to_le64(tmp);
3131 tmp += DB_PAGE_SIZE;
3132 base_indirect_ptr++;
3133 page_entries++;
3134 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003135 cqicb->sbq_addr =
3136 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003137 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003138 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003139 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3140 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003141 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003142 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003143 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003144 rx_ring->sbq_clean_idx = 0;
3145 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003146 }
3147 switch (rx_ring->type) {
3148 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003149 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3150 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3151 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152 case RX_Q:
3153 /* Inbound completion handling rx_rings run in
3154 * separate NAPI contexts.
3155 */
3156 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3157 64);
3158 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3159 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3160 break;
3161 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003162 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3163 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003164 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003165 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3166 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003167 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3168 CFG_LCQ, rx_ring->cq_id);
3169 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003170 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003171 return err;
3172 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003173 return err;
3174}
3175
3176static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3177{
3178 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3179 void __iomem *doorbell_area =
3180 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3181 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3182 (tx_ring->wq_id * sizeof(u64));
3183 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3184 (tx_ring->wq_id * sizeof(u64));
3185 int err = 0;
3186
3187 /*
3188 * Assign doorbell registers for this tx_ring.
3189 */
3190 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003191 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003192 tx_ring->prod_idx = 0;
3193 /* TX PCI doorbell mem area + 0x04 */
3194 tx_ring->valid_db_reg = doorbell_area + 0x04;
3195
3196 /*
3197 * Assign shadow registers for this tx_ring.
3198 */
3199 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3200 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3201
3202 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3203 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3204 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3205 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3206 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003207 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003208
Ron Mercer97345522009-01-09 11:31:50 +00003209 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003210
3211 ql_init_tx_ring(qdev, tx_ring);
3212
Ron Mercere3324712009-07-02 06:06:13 +00003213 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003214 (u16) tx_ring->wq_id);
3215 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003216 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003217 return err;
3218 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003219 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3220 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003221 return err;
3222}
3223
3224static void ql_disable_msix(struct ql_adapter *qdev)
3225{
3226 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3227 pci_disable_msix(qdev->pdev);
3228 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3229 kfree(qdev->msi_x_entry);
3230 qdev->msi_x_entry = NULL;
3231 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3232 pci_disable_msi(qdev->pdev);
3233 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3234 }
3235}
3236
Ron Mercera4ab6132009-08-27 11:02:10 +00003237/* We start by trying to get the number of vectors
3238 * stored in qdev->intr_count. If we don't get that
3239 * many then we reduce the count and try again.
3240 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003241static void ql_enable_msix(struct ql_adapter *qdev)
3242{
Ron Mercera4ab6132009-08-27 11:02:10 +00003243 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003244
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003245 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003246 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003247 /* Try to alloc space for the msix struct,
3248 * if it fails then go to MSI/legacy.
3249 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003250 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003251 sizeof(struct msix_entry),
3252 GFP_KERNEL);
3253 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003254 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003255 goto msi;
3256 }
3257
Ron Mercera4ab6132009-08-27 11:02:10 +00003258 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003259 qdev->msi_x_entry[i].entry = i;
3260
Ron Mercera4ab6132009-08-27 11:02:10 +00003261 /* Loop to get our vectors. We start with
3262 * what we want and settle for what we get.
3263 */
3264 do {
3265 err = pci_enable_msix(qdev->pdev,
3266 qdev->msi_x_entry, qdev->intr_count);
3267 if (err > 0)
3268 qdev->intr_count = err;
3269 } while (err > 0);
3270
3271 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003272 kfree(qdev->msi_x_entry);
3273 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003274 netif_warn(qdev, ifup, qdev->ndev,
3275 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003276 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003277 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003278 } else if (err == 0) {
3279 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003280 netif_info(qdev, ifup, qdev->ndev,
3281 "MSI-X Enabled, got %d vectors.\n",
3282 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003283 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003284 }
3285 }
3286msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003287 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003288 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003289 if (!pci_enable_msi(qdev->pdev)) {
3290 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003291 netif_info(qdev, ifup, qdev->ndev,
3292 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003293 return;
3294 }
3295 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003296 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003297 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3298 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003299}
3300
Ron Mercer39aa8162009-08-27 11:02:11 +00003301/* Each vector services 1 RSS ring and and 1 or more
3302 * TX completion rings. This function loops through
3303 * the TX completion rings and assigns the vector that
3304 * will service it. An example would be if there are
3305 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3306 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003307 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003308 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3309 */
3310static void ql_set_tx_vect(struct ql_adapter *qdev)
3311{
3312 int i, j, vect;
3313 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3314
3315 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3316 /* Assign irq vectors to TX rx_rings.*/
3317 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3318 i < qdev->rx_ring_count; i++) {
3319 if (j == tx_rings_per_vector) {
3320 vect++;
3321 j = 0;
3322 }
3323 qdev->rx_ring[i].irq = vect;
3324 j++;
3325 }
3326 } else {
3327 /* For single vector all rings have an irq
3328 * of zero.
3329 */
3330 for (i = 0; i < qdev->rx_ring_count; i++)
3331 qdev->rx_ring[i].irq = 0;
3332 }
3333}
3334
3335/* Set the interrupt mask for this vector. Each vector
3336 * will service 1 RSS ring and 1 or more TX completion
3337 * rings. This function sets up a bit mask per vector
3338 * that indicates which rings it services.
3339 */
3340static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3341{
3342 int j, vect = ctx->intr;
3343 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3344
3345 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3346 /* Add the RSS ring serviced by this vector
3347 * to the mask.
3348 */
3349 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3350 /* Add the TX ring(s) serviced by this vector
3351 * to the mask. */
3352 for (j = 0; j < tx_rings_per_vector; j++) {
3353 ctx->irq_mask |=
3354 (1 << qdev->rx_ring[qdev->rss_ring_count +
3355 (vect * tx_rings_per_vector) + j].cq_id);
3356 }
3357 } else {
3358 /* For single vector we just shift each queue's
3359 * ID into the mask.
3360 */
3361 for (j = 0; j < qdev->rx_ring_count; j++)
3362 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3363 }
3364}
3365
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003366/*
3367 * Here we build the intr_context structures based on
3368 * our rx_ring count and intr vector count.
3369 * The intr_context structure is used to hook each vector
3370 * to possibly different handlers.
3371 */
3372static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3373{
3374 int i = 0;
3375 struct intr_context *intr_context = &qdev->intr_context[0];
3376
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003377 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3378 /* Each rx_ring has it's
3379 * own intr_context since we have separate
3380 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003381 */
3382 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3383 qdev->rx_ring[i].irq = i;
3384 intr_context->intr = i;
3385 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003386 /* Set up this vector's bit-mask that indicates
3387 * which queues it services.
3388 */
3389 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003390 /*
3391 * We set up each vectors enable/disable/read bits so
3392 * there's no bit/mask calculations in the critical path.
3393 */
3394 intr_context->intr_en_mask =
3395 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3396 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3397 | i;
3398 intr_context->intr_dis_mask =
3399 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3400 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3401 INTR_EN_IHD | i;
3402 intr_context->intr_read_mask =
3403 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3404 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3405 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003406 if (i == 0) {
3407 /* The first vector/queue handles
3408 * broadcast/multicast, fatal errors,
3409 * and firmware events. This in addition
3410 * to normal inbound NAPI processing.
3411 */
3412 intr_context->handler = qlge_isr;
3413 sprintf(intr_context->name, "%s-rx-%d",
3414 qdev->ndev->name, i);
3415 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003416 /*
3417 * Inbound queues handle unicast frames only.
3418 */
3419 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003420 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003421 qdev->ndev->name, i);
3422 }
3423 }
3424 } else {
3425 /*
3426 * All rx_rings use the same intr_context since
3427 * there is only one vector.
3428 */
3429 intr_context->intr = 0;
3430 intr_context->qdev = qdev;
3431 /*
3432 * We set up each vectors enable/disable/read bits so
3433 * there's no bit/mask calculations in the critical path.
3434 */
3435 intr_context->intr_en_mask =
3436 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3437 intr_context->intr_dis_mask =
3438 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3439 INTR_EN_TYPE_DISABLE;
3440 intr_context->intr_read_mask =
3441 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3442 /*
3443 * Single interrupt means one handler for all rings.
3444 */
3445 intr_context->handler = qlge_isr;
3446 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003447 /* Set up this vector's bit-mask that indicates
3448 * which queues it services. In this case there is
3449 * a single vector so it will service all RSS and
3450 * TX completion rings.
3451 */
3452 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003453 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003454 /* Tell the TX completion rings which MSIx vector
3455 * they will be using.
3456 */
3457 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003458}
3459
3460static void ql_free_irq(struct ql_adapter *qdev)
3461{
3462 int i;
3463 struct intr_context *intr_context = &qdev->intr_context[0];
3464
3465 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3466 if (intr_context->hooked) {
3467 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3468 free_irq(qdev->msi_x_entry[i].vector,
3469 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003470 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3471 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003472 } else {
3473 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003474 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3475 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003476 }
3477 }
3478 }
3479 ql_disable_msix(qdev);
3480}
3481
3482static int ql_request_irq(struct ql_adapter *qdev)
3483{
3484 int i;
3485 int status = 0;
3486 struct pci_dev *pdev = qdev->pdev;
3487 struct intr_context *intr_context = &qdev->intr_context[0];
3488
3489 ql_resolve_queues_to_irqs(qdev);
3490
3491 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3492 atomic_set(&intr_context->irq_cnt, 0);
3493 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3494 status = request_irq(qdev->msi_x_entry[i].vector,
3495 intr_context->handler,
3496 0,
3497 intr_context->name,
3498 &qdev->rx_ring[i]);
3499 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003500 netif_err(qdev, ifup, qdev->ndev,
3501 "Failed request for MSIX interrupt %d.\n",
3502 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003503 goto err_irq;
3504 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003505 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3506 "Hooked intr %d, queue type %s, with name %s.\n",
3507 i,
3508 qdev->rx_ring[i].type == DEFAULT_Q ?
3509 "DEFAULT_Q" :
3510 qdev->rx_ring[i].type == TX_Q ?
3511 "TX_Q" :
3512 qdev->rx_ring[i].type == RX_Q ?
3513 "RX_Q" : "",
3514 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003515 }
3516 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003517 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3518 "trying msi or legacy interrupts.\n");
3519 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3520 "%s: irq = %d.\n", __func__, pdev->irq);
3521 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3522 "%s: context->name = %s.\n", __func__,
3523 intr_context->name);
3524 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3525 "%s: dev_id = 0x%p.\n", __func__,
3526 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003527 status =
3528 request_irq(pdev->irq, qlge_isr,
3529 test_bit(QL_MSI_ENABLED,
3530 &qdev->
3531 flags) ? 0 : IRQF_SHARED,
3532 intr_context->name, &qdev->rx_ring[0]);
3533 if (status)
3534 goto err_irq;
3535
Joe Perchesae9540f72010-02-09 11:49:52 +00003536 netif_err(qdev, ifup, qdev->ndev,
3537 "Hooked intr %d, queue type %s, with name %s.\n",
3538 i,
3539 qdev->rx_ring[0].type == DEFAULT_Q ?
3540 "DEFAULT_Q" :
3541 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3542 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3543 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544 }
3545 intr_context->hooked = 1;
3546 }
3547 return status;
3548err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003549 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003550 ql_free_irq(qdev);
3551 return status;
3552}
3553
3554static int ql_start_rss(struct ql_adapter *qdev)
3555{
Joe Perches215faf92010-12-21 02:16:10 -08003556 static const u8 init_hash_seed[] = {
3557 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3558 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3559 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3560 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3561 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3562 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 struct ricb *ricb = &qdev->ricb;
3564 int status = 0;
3565 int i;
3566 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3567
Ron Mercere3324712009-07-02 06:06:13 +00003568 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003569
Ron Mercerb2014ff2009-08-27 11:02:09 +00003570 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003571 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003572 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3573 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003574
3575 /*
3576 * Fill out the Indirection Table.
3577 */
Ron Mercer541ae282009-10-08 09:54:37 +00003578 for (i = 0; i < 1024; i++)
3579 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003580
Ron Mercer541ae282009-10-08 09:54:37 +00003581 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3582 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003583
Joe Perchesae9540f72010-02-09 11:49:52 +00003584 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003585
Ron Mercere3324712009-07-02 06:06:13 +00003586 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003587 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003588 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003589 return status;
3590 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003591 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3592 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003593 return status;
3594}
3595
Ron Mercera5f59dc2009-07-02 06:06:07 +00003596static int ql_clear_routing_entries(struct ql_adapter *qdev)
3597{
3598 int i, status = 0;
3599
3600 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3601 if (status)
3602 return status;
3603 /* Clear all the entries in the routing table. */
3604 for (i = 0; i < 16; i++) {
3605 status = ql_set_routing_reg(qdev, i, 0, 0);
3606 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003607 netif_err(qdev, ifup, qdev->ndev,
3608 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003609 break;
3610 }
3611 }
3612 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3613 return status;
3614}
3615
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003616/* Initialize the frame-to-queue routing. */
3617static int ql_route_initialize(struct ql_adapter *qdev)
3618{
3619 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003620
3621 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003622 status = ql_clear_routing_entries(qdev);
3623 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003624 return status;
3625
3626 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3627 if (status)
3628 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003629
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003630 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3631 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003632 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003633 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003634 "Failed to init routing register "
3635 "for IP CSUM error packets.\n");
3636 goto exit;
3637 }
3638 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3639 RT_IDX_TU_CSUM_ERR, 1);
3640 if (status) {
3641 netif_err(qdev, ifup, qdev->ndev,
3642 "Failed to init routing register "
3643 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003644 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003645 }
3646 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3647 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003648 netif_err(qdev, ifup, qdev->ndev,
3649 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003650 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003651 }
3652 /* If we have more than one inbound queue, then turn on RSS in the
3653 * routing block.
3654 */
3655 if (qdev->rss_ring_count > 1) {
3656 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3657 RT_IDX_RSS_MATCH, 1);
3658 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003659 netif_err(qdev, ifup, qdev->ndev,
3660 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003661 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003662 }
3663 }
3664
3665 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3666 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003667 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003668 netif_err(qdev, ifup, qdev->ndev,
3669 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003670exit:
3671 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003672 return status;
3673}
3674
Ron Mercer2ee1e272009-03-03 12:10:33 +00003675int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003676{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003677 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003678
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003679 /* If check if the link is up and use to
3680 * determine if we are setting or clearing
3681 * the MAC address in the CAM.
3682 */
3683 set = ql_read32(qdev, STS);
3684 set &= qdev->port_link_up;
3685 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003686 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003687 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003688 return status;
3689 }
3690
3691 status = ql_route_initialize(qdev);
3692 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003693 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003694
3695 return status;
3696}
3697
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003698static int ql_adapter_initialize(struct ql_adapter *qdev)
3699{
3700 u32 value, mask;
3701 int i;
3702 int status = 0;
3703
3704 /*
3705 * Set up the System register to halt on errors.
3706 */
3707 value = SYS_EFE | SYS_FAE;
3708 mask = value << 16;
3709 ql_write32(qdev, SYS, mask | value);
3710
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003711 /* Set the default queue, and VLAN behavior. */
3712 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3713 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003714 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3715
3716 /* Set the MPI interrupt to enabled. */
3717 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3718
3719 /* Enable the function, set pagesize, enable error checking. */
3720 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003721 FSC_EC | FSC_VM_PAGE_4K;
3722 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003723
3724 /* Set/clear header splitting. */
3725 mask = FSC_VM_PAGESIZE_MASK |
3726 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3727 ql_write32(qdev, FSC, mask | value);
3728
Ron Mercer572c5262010-01-02 10:37:42 +00003729 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003730
Ron Mercera3b71932009-10-08 09:54:38 +00003731 /* Set RX packet routing to use port/pci function on which the
3732 * packet arrived on in addition to usual frame routing.
3733 * This is helpful on bonding where both interfaces can have
3734 * the same MAC address.
3735 */
3736 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003737 /* Reroute all packets to our Interface.
3738 * They may have been routed to MPI firmware
3739 * due to WOL.
3740 */
3741 value = ql_read32(qdev, MGMT_RCV_CFG);
3742 value &= ~MGMT_RCV_CFG_RM;
3743 mask = 0xffff0000;
3744
3745 /* Sticky reg needs clearing due to WOL. */
3746 ql_write32(qdev, MGMT_RCV_CFG, mask);
3747 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3748
3749 /* Default WOL is enable on Mezz cards */
3750 if (qdev->pdev->subsystem_device == 0x0068 ||
3751 qdev->pdev->subsystem_device == 0x0180)
3752 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003753
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003754 /* Start up the rx queues. */
3755 for (i = 0; i < qdev->rx_ring_count; i++) {
3756 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3757 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003758 netif_err(qdev, ifup, qdev->ndev,
3759 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003760 return status;
3761 }
3762 }
3763
3764 /* If there is more than one inbound completion queue
3765 * then download a RICB to configure RSS.
3766 */
3767 if (qdev->rss_ring_count > 1) {
3768 status = ql_start_rss(qdev);
3769 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003770 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003771 return status;
3772 }
3773 }
3774
3775 /* Start up the tx queues. */
3776 for (i = 0; i < qdev->tx_ring_count; i++) {
3777 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3778 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003779 netif_err(qdev, ifup, qdev->ndev,
3780 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003781 return status;
3782 }
3783 }
3784
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003785 /* Initialize the port and set the max framesize. */
3786 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003787 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003788 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003789
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003790 /* Set up the MAC address and frame routing filter. */
3791 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003792 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003793 netif_err(qdev, ifup, qdev->ndev,
3794 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003795 return status;
3796 }
3797
3798 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003799 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003800 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3801 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003802 napi_enable(&qdev->rx_ring[i].napi);
3803 }
3804
3805 return status;
3806}
3807
3808/* Issue soft reset to chip. */
3809static int ql_adapter_reset(struct ql_adapter *qdev)
3810{
3811 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003812 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003813 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003814
Ron Mercera5f59dc2009-07-02 06:06:07 +00003815 /* Clear all the entries in the routing table. */
3816 status = ql_clear_routing_entries(qdev);
3817 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003818 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003819 return status;
3820 }
3821
3822 end_jiffies = jiffies +
3823 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003824
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003825 /* Check if bit is set then skip the mailbox command and
3826 * clear the bit, else we are in normal reset process.
3827 */
3828 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3829 /* Stop management traffic. */
3830 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003831
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003832 /* Wait for the NIC and MGMNT FIFOs to empty. */
3833 ql_wait_fifo_empty(qdev);
3834 } else
3835 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003836
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003837 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003838
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003839 do {
3840 value = ql_read32(qdev, RST_FO);
3841 if ((value & RST_FO_FR) == 0)
3842 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003843 cpu_relax();
3844 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003845
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003846 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003847 netif_err(qdev, ifdown, qdev->ndev,
3848 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003849 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003850 }
3851
Ron Mercer84087f42009-10-08 09:54:41 +00003852 /* Resume management traffic. */
3853 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003854 return status;
3855}
3856
3857static void ql_display_dev_info(struct net_device *ndev)
3858{
Joe Perchesb16fed02010-11-15 11:12:28 +00003859 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003860
Joe Perchesae9540f72010-02-09 11:49:52 +00003861 netif_info(qdev, probe, qdev->ndev,
3862 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3863 "XG Roll = %d, XG Rev = %d.\n",
3864 qdev->func,
3865 qdev->port,
3866 qdev->chip_rev_id & 0x0000000f,
3867 qdev->chip_rev_id >> 4 & 0x0000000f,
3868 qdev->chip_rev_id >> 8 & 0x0000000f,
3869 qdev->chip_rev_id >> 12 & 0x0000000f);
3870 netif_info(qdev, probe, qdev->ndev,
3871 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003872}
3873
stephen hemmingerac409212010-10-21 07:50:54 +00003874static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003875{
3876 int status = 0;
3877 u32 wol = MB_WOL_DISABLE;
3878
3879 /* The CAM is still intact after a reset, but if we
3880 * are doing WOL, then we may need to program the
3881 * routing regs. We would also need to issue the mailbox
3882 * commands to instruct the MPI what to do per the ethtool
3883 * settings.
3884 */
3885
3886 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3887 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003888 netif_err(qdev, ifdown, qdev->ndev,
3889 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3890 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003891 return -EINVAL;
3892 }
3893
3894 if (qdev->wol & WAKE_MAGIC) {
3895 status = ql_mb_wol_set_magic(qdev, 1);
3896 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003897 netif_err(qdev, ifdown, qdev->ndev,
3898 "Failed to set magic packet on %s.\n",
3899 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003900 return status;
3901 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003902 netif_info(qdev, drv, qdev->ndev,
3903 "Enabled magic packet successfully on %s.\n",
3904 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003905
3906 wol |= MB_WOL_MAGIC_PKT;
3907 }
3908
3909 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003910 wol |= MB_WOL_MODE_ON;
3911 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003912 netif_err(qdev, drv, qdev->ndev,
3913 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003914 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003915 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003916 }
3917
3918 return status;
3919}
3920
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003921static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003922{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003923
Ron Mercer6497b602009-02-12 16:37:13 -08003924 /* Don't kill the reset worker thread if we
3925 * are in the process of recovery.
3926 */
3927 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3928 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003929 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3930 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003931 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003932 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003933 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003934}
3935
3936static int ql_adapter_down(struct ql_adapter *qdev)
3937{
3938 int i, status = 0;
3939
3940 ql_link_off(qdev);
3941
3942 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003943
Ron Mercer39aa8162009-08-27 11:02:11 +00003944 for (i = 0; i < qdev->rss_ring_count; i++)
3945 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003946
3947 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3948
3949 ql_disable_interrupts(qdev);
3950
3951 ql_tx_ring_clean(qdev);
3952
Ron Mercer6b318cb2009-03-09 10:59:26 +00003953 /* Call netif_napi_del() from common point.
3954 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003955 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003956 netif_napi_del(&qdev->rx_ring[i].napi);
3957
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003958 status = ql_adapter_reset(qdev);
3959 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003960 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3961 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003962 ql_free_rx_buffers(qdev);
3963
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003964 return status;
3965}
3966
3967static int ql_adapter_up(struct ql_adapter *qdev)
3968{
3969 int err = 0;
3970
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003971 err = ql_adapter_initialize(qdev);
3972 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003973 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003974 goto err_init;
3975 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003976 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003977 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003978 /* If the port is initialized and the
3979 * link is up the turn on the carrier.
3980 */
3981 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3982 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003983 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003984 /* Restore rx mode. */
3985 clear_bit(QL_ALLMULTI, &qdev->flags);
3986 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3987 qlge_set_multicast_list(qdev->ndev);
3988
Ron Mercerc1b60092010-10-27 04:58:12 +00003989 /* Restore vlan setting. */
3990 qlge_restore_vlan(qdev);
3991
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003992 ql_enable_interrupts(qdev);
3993 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003994 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003995
3996 return 0;
3997err_init:
3998 ql_adapter_reset(qdev);
3999 return err;
4000}
4001
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004002static void ql_release_adapter_resources(struct ql_adapter *qdev)
4003{
4004 ql_free_mem_resources(qdev);
4005 ql_free_irq(qdev);
4006}
4007
4008static int ql_get_adapter_resources(struct ql_adapter *qdev)
4009{
4010 int status = 0;
4011
4012 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004013 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004014 return -ENOMEM;
4015 }
4016 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004017 return status;
4018}
4019
4020static int qlge_close(struct net_device *ndev)
4021{
4022 struct ql_adapter *qdev = netdev_priv(ndev);
4023
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004024 /* If we hit pci_channel_io_perm_failure
4025 * failure condition, then we already
4026 * brought the adapter down.
4027 */
4028 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004029 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004030 clear_bit(QL_EEH_FATAL, &qdev->flags);
4031 return 0;
4032 }
4033
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004034 /*
4035 * Wait for device to recover from a reset.
4036 * (Rarely happens, but possible.)
4037 */
4038 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4039 msleep(1);
4040 ql_adapter_down(qdev);
4041 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004042 return 0;
4043}
4044
4045static int ql_configure_rings(struct ql_adapter *qdev)
4046{
4047 int i;
4048 struct rx_ring *rx_ring;
4049 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004050 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004051 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4052 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4053
4054 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004055
Ron Mercera4ab6132009-08-27 11:02:10 +00004056 /* In a perfect world we have one RSS ring for each CPU
4057 * and each has it's own vector. To do that we ask for
4058 * cpu_cnt vectors. ql_enable_msix() will adjust the
4059 * vector count to what we actually get. We then
4060 * allocate an RSS ring for each.
4061 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004062 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004063 qdev->intr_count = cpu_cnt;
4064 ql_enable_msix(qdev);
4065 /* Adjust the RSS ring count to the actual vector count. */
4066 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004067 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004068 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004069
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004070 for (i = 0; i < qdev->tx_ring_count; i++) {
4071 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004072 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004073 tx_ring->qdev = qdev;
4074 tx_ring->wq_id = i;
4075 tx_ring->wq_len = qdev->tx_ring_size;
4076 tx_ring->wq_size =
4077 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4078
4079 /*
4080 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004081 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004082 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004083 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004084 }
4085
4086 for (i = 0; i < qdev->rx_ring_count; i++) {
4087 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004088 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004089 rx_ring->qdev = qdev;
4090 rx_ring->cq_id = i;
4091 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004092 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004093 /*
4094 * Inbound (RSS) queues.
4095 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004096 rx_ring->cq_len = qdev->rx_ring_size;
4097 rx_ring->cq_size =
4098 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4099 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4100 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004101 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004102 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004103 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4104 "lbq_buf_size %d, order = %d\n",
4105 rx_ring->lbq_buf_size,
4106 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004107 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4108 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004109 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004110 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004111 rx_ring->type = RX_Q;
4112 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004113 /*
4114 * Outbound queue handles outbound completions only.
4115 */
4116 /* outbound cq is same size as tx_ring it services. */
4117 rx_ring->cq_len = qdev->tx_ring_size;
4118 rx_ring->cq_size =
4119 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4120 rx_ring->lbq_len = 0;
4121 rx_ring->lbq_size = 0;
4122 rx_ring->lbq_buf_size = 0;
4123 rx_ring->sbq_len = 0;
4124 rx_ring->sbq_size = 0;
4125 rx_ring->sbq_buf_size = 0;
4126 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004127 }
4128 }
4129 return 0;
4130}
4131
4132static int qlge_open(struct net_device *ndev)
4133{
4134 int err = 0;
4135 struct ql_adapter *qdev = netdev_priv(ndev);
4136
Ron Mercer74e12432009-11-11 12:54:04 +00004137 err = ql_adapter_reset(qdev);
4138 if (err)
4139 return err;
4140
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004141 err = ql_configure_rings(qdev);
4142 if (err)
4143 return err;
4144
4145 err = ql_get_adapter_resources(qdev);
4146 if (err)
4147 goto error_up;
4148
4149 err = ql_adapter_up(qdev);
4150 if (err)
4151 goto error_up;
4152
4153 return err;
4154
4155error_up:
4156 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004157 return err;
4158}
4159
Ron Mercer7c734352009-10-19 03:32:19 +00004160static int ql_change_rx_buffers(struct ql_adapter *qdev)
4161{
4162 struct rx_ring *rx_ring;
4163 int i, status;
4164 u32 lbq_buf_len;
4165
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004166 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004167 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4168 int i = 3;
4169 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004170 netif_err(qdev, ifup, qdev->ndev,
4171 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004172 ssleep(1);
4173 }
4174
4175 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004176 netif_err(qdev, ifup, qdev->ndev,
4177 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004178 return -ETIMEDOUT;
4179 }
4180 }
4181
4182 status = ql_adapter_down(qdev);
4183 if (status)
4184 goto error;
4185
4186 /* Get the new rx buffer size. */
4187 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4188 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4189 qdev->lbq_buf_order = get_order(lbq_buf_len);
4190
4191 for (i = 0; i < qdev->rss_ring_count; i++) {
4192 rx_ring = &qdev->rx_ring[i];
4193 /* Set the new size. */
4194 rx_ring->lbq_buf_size = lbq_buf_len;
4195 }
4196
4197 status = ql_adapter_up(qdev);
4198 if (status)
4199 goto error;
4200
4201 return status;
4202error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004203 netif_alert(qdev, ifup, qdev->ndev,
4204 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004205 set_bit(QL_ADAPTER_UP, &qdev->flags);
4206 dev_close(qdev->ndev);
4207 return status;
4208}
4209
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004210static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4211{
4212 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004213 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004214
4215 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004216 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004217 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004218 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004219 } else
4220 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004221
4222 queue_delayed_work(qdev->workqueue,
4223 &qdev->mpi_port_cfg_work, 3*HZ);
4224
Breno Leitao746079d2010-02-04 10:11:19 +00004225 ndev->mtu = new_mtu;
4226
Ron Mercer7c734352009-10-19 03:32:19 +00004227 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004228 return 0;
4229 }
4230
Ron Mercer7c734352009-10-19 03:32:19 +00004231 status = ql_change_rx_buffers(qdev);
4232 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004233 netif_err(qdev, ifup, qdev->ndev,
4234 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004235 }
4236
4237 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004238}
4239
4240static struct net_device_stats *qlge_get_stats(struct net_device
4241 *ndev)
4242{
Ron Mercer885ee392009-11-03 13:49:31 +00004243 struct ql_adapter *qdev = netdev_priv(ndev);
4244 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4245 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4246 unsigned long pkts, mcast, dropped, errors, bytes;
4247 int i;
4248
4249 /* Get RX stats. */
4250 pkts = mcast = dropped = errors = bytes = 0;
4251 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4252 pkts += rx_ring->rx_packets;
4253 bytes += rx_ring->rx_bytes;
4254 dropped += rx_ring->rx_dropped;
4255 errors += rx_ring->rx_errors;
4256 mcast += rx_ring->rx_multicast;
4257 }
4258 ndev->stats.rx_packets = pkts;
4259 ndev->stats.rx_bytes = bytes;
4260 ndev->stats.rx_dropped = dropped;
4261 ndev->stats.rx_errors = errors;
4262 ndev->stats.multicast = mcast;
4263
4264 /* Get TX stats. */
4265 pkts = errors = bytes = 0;
4266 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4267 pkts += tx_ring->tx_packets;
4268 bytes += tx_ring->tx_bytes;
4269 errors += tx_ring->tx_errors;
4270 }
4271 ndev->stats.tx_packets = pkts;
4272 ndev->stats.tx_bytes = bytes;
4273 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004274 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004275}
4276
stephen hemmingerac409212010-10-21 07:50:54 +00004277static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004278{
Joe Perchesb16fed02010-11-15 11:12:28 +00004279 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004280 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004281 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004282
Ron Mercercc288f52009-02-23 10:42:14 +00004283 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4284 if (status)
4285 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004286 /*
4287 * Set or clear promiscuous mode if a
4288 * transition is taking place.
4289 */
4290 if (ndev->flags & IFF_PROMISC) {
4291 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4292 if (ql_set_routing_reg
4293 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004294 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004295 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004296 } else {
4297 set_bit(QL_PROMISCUOUS, &qdev->flags);
4298 }
4299 }
4300 } else {
4301 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4302 if (ql_set_routing_reg
4303 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004304 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004305 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004306 } else {
4307 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4308 }
4309 }
4310 }
4311
4312 /*
4313 * Set or clear all multicast mode if a
4314 * transition is taking place.
4315 */
4316 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004317 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004318 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4319 if (ql_set_routing_reg
4320 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004321 netif_err(qdev, hw, qdev->ndev,
4322 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004323 } else {
4324 set_bit(QL_ALLMULTI, &qdev->flags);
4325 }
4326 }
4327 } else {
4328 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4329 if (ql_set_routing_reg
4330 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004331 netif_err(qdev, hw, qdev->ndev,
4332 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333 } else {
4334 clear_bit(QL_ALLMULTI, &qdev->flags);
4335 }
4336 }
4337 }
4338
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004339 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004340 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4341 if (status)
4342 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004343 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004344 netdev_for_each_mc_addr(ha, ndev) {
4345 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004346 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004347 netif_err(qdev, hw, qdev->ndev,
4348 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004349 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004350 goto exit;
4351 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004352 i++;
4353 }
Ron Mercercc288f52009-02-23 10:42:14 +00004354 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004355 if (ql_set_routing_reg
4356 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004357 netif_err(qdev, hw, qdev->ndev,
4358 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004359 } else {
4360 set_bit(QL_ALLMULTI, &qdev->flags);
4361 }
4362 }
4363exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004364 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004365}
4366
4367static int qlge_set_mac_address(struct net_device *ndev, void *p)
4368{
Joe Perchesb16fed02010-11-15 11:12:28 +00004369 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004370 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004371 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004372
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004373 if (!is_valid_ether_addr(addr->sa_data))
4374 return -EADDRNOTAVAIL;
4375 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004376 /* Update local copy of current mac address. */
4377 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004378
Ron Mercercc288f52009-02-23 10:42:14 +00004379 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4380 if (status)
4381 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004382 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4383 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004384 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004385 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004386 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4387 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004388}
4389
4390static void qlge_tx_timeout(struct net_device *ndev)
4391{
Joe Perchesb16fed02010-11-15 11:12:28 +00004392 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004393 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004394}
4395
4396static void ql_asic_reset_work(struct work_struct *work)
4397{
4398 struct ql_adapter *qdev =
4399 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004400 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004401 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004402 status = ql_adapter_down(qdev);
4403 if (status)
4404 goto error;
4405
4406 status = ql_adapter_up(qdev);
4407 if (status)
4408 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004409
4410 /* Restore rx mode. */
4411 clear_bit(QL_ALLMULTI, &qdev->flags);
4412 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4413 qlge_set_multicast_list(qdev->ndev);
4414
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004415 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004416 return;
4417error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004418 netif_alert(qdev, ifup, qdev->ndev,
4419 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004420
Ron Mercerdb988122009-03-09 10:59:17 +00004421 set_bit(QL_ADAPTER_UP, &qdev->flags);
4422 dev_close(qdev->ndev);
4423 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004424}
4425
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004426static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004427 .get_flash = ql_get_8012_flash_params,
4428 .port_initialize = ql_8012_port_initialize,
4429};
4430
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004431static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004432 .get_flash = ql_get_8000_flash_params,
4433 .port_initialize = ql_8000_port_initialize,
4434};
4435
Ron Mercere4552f52009-06-09 05:39:32 +00004436/* Find the pcie function number for the other NIC
4437 * on this chip. Since both NIC functions share a
4438 * common firmware we have the lowest enabled function
4439 * do any common work. Examples would be resetting
4440 * after a fatal firmware error, or doing a firmware
4441 * coredump.
4442 */
4443static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004444{
Ron Mercere4552f52009-06-09 05:39:32 +00004445 int status = 0;
4446 u32 temp;
4447 u32 nic_func1, nic_func2;
4448
4449 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4450 &temp);
4451 if (status)
4452 return status;
4453
4454 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4455 MPI_TEST_NIC_FUNC_MASK);
4456 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4457 MPI_TEST_NIC_FUNC_MASK);
4458
4459 if (qdev->func == nic_func1)
4460 qdev->alt_func = nic_func2;
4461 else if (qdev->func == nic_func2)
4462 qdev->alt_func = nic_func1;
4463 else
4464 status = -EIO;
4465
4466 return status;
4467}
4468
4469static int ql_get_board_info(struct ql_adapter *qdev)
4470{
4471 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004472 qdev->func =
4473 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004474 if (qdev->func > 3)
4475 return -EIO;
4476
4477 status = ql_get_alt_pcie_func(qdev);
4478 if (status)
4479 return status;
4480
4481 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4482 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004483 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4484 qdev->port_link_up = STS_PL1;
4485 qdev->port_init = STS_PI1;
4486 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4487 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4488 } else {
4489 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4490 qdev->port_link_up = STS_PL0;
4491 qdev->port_init = STS_PI0;
4492 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4493 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4494 }
4495 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004496 qdev->device_id = qdev->pdev->device;
4497 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4498 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004499 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4500 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004501 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004502}
4503
4504static void ql_release_all(struct pci_dev *pdev)
4505{
4506 struct net_device *ndev = pci_get_drvdata(pdev);
4507 struct ql_adapter *qdev = netdev_priv(ndev);
4508
4509 if (qdev->workqueue) {
4510 destroy_workqueue(qdev->workqueue);
4511 qdev->workqueue = NULL;
4512 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004513
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004514 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004515 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004516 if (qdev->doorbell_area)
4517 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004518 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004519 pci_release_regions(pdev);
4520 pci_set_drvdata(pdev, NULL);
4521}
4522
4523static int __devinit ql_init_device(struct pci_dev *pdev,
4524 struct net_device *ndev, int cards_found)
4525{
4526 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004527 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004528
Ron Mercere3324712009-07-02 06:06:13 +00004529 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 err = pci_enable_device(pdev);
4531 if (err) {
4532 dev_err(&pdev->dev, "PCI device enable failed.\n");
4533 return err;
4534 }
4535
Ron Mercerebd6e772009-09-29 08:39:25 +00004536 qdev->ndev = ndev;
4537 qdev->pdev = pdev;
4538 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004539
Ron Mercerbc9167f2009-10-10 09:35:04 +00004540 /* Set PCIe read request size */
4541 err = pcie_set_readrq(pdev, 4096);
4542 if (err) {
4543 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004544 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004545 }
4546
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004547 err = pci_request_regions(pdev, DRV_NAME);
4548 if (err) {
4549 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004550 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004551 }
4552
4553 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004554 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004555 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004556 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004557 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004558 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004559 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004560 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004561 }
4562
4563 if (err) {
4564 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004565 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 }
4567
Ron Mercer73475332009-11-06 07:44:58 +00004568 /* Set PCIe reset type for EEH to fundamental. */
4569 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004570 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 qdev->reg_base =
4572 ioremap_nocache(pci_resource_start(pdev, 1),
4573 pci_resource_len(pdev, 1));
4574 if (!qdev->reg_base) {
4575 dev_err(&pdev->dev, "Register mapping failed.\n");
4576 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004577 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578 }
4579
4580 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4581 qdev->doorbell_area =
4582 ioremap_nocache(pci_resource_start(pdev, 3),
4583 pci_resource_len(pdev, 3));
4584 if (!qdev->doorbell_area) {
4585 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4586 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004587 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004588 }
4589
Ron Mercere4552f52009-06-09 05:39:32 +00004590 err = ql_get_board_info(qdev);
4591 if (err) {
4592 dev_err(&pdev->dev, "Register access failed.\n");
4593 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004594 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004595 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004596 qdev->msg_enable = netif_msg_init(debug, default_msg);
4597 spin_lock_init(&qdev->hw_lock);
4598 spin_lock_init(&qdev->stats_lock);
4599
Ron Mercer8aae2602010-01-15 13:31:28 +00004600 if (qlge_mpi_coredump) {
4601 qdev->mpi_coredump =
4602 vmalloc(sizeof(struct ql_mpi_coredump));
4603 if (qdev->mpi_coredump == NULL) {
4604 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4605 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004606 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004607 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004608 if (qlge_force_coredump)
4609 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004610 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004611 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004612 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004613 if (err) {
4614 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004615 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004616 }
4617
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004618 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004619 /* Keep local copy of current mac address. */
4620 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004621
4622 /* Set up the default ring sizes. */
4623 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4624 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4625
4626 /* Set up the coalescing parameters. */
4627 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4628 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4629 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4630 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4631
4632 /*
4633 * Set up the operating parameters.
4634 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004635 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4636 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4637 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4638 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004639 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004640 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004641 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004642 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004643 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004644
4645 if (!cards_found) {
4646 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4647 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4648 DRV_NAME, DRV_VERSION);
4649 }
4650 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004651err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004652 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004653err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004654 pci_disable_device(pdev);
4655 return err;
4656}
4657
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004658static const struct net_device_ops qlge_netdev_ops = {
4659 .ndo_open = qlge_open,
4660 .ndo_stop = qlge_close,
4661 .ndo_start_xmit = qlge_send,
4662 .ndo_change_mtu = qlge_change_mtu,
4663 .ndo_get_stats = qlge_get_stats,
4664 .ndo_set_multicast_list = qlge_set_multicast_list,
4665 .ndo_set_mac_address = qlge_set_mac_address,
4666 .ndo_validate_addr = eth_validate_addr,
4667 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004668 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4669 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4670 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004671};
4672
Ron Mercer15c052f2010-02-04 13:32:46 -08004673static void ql_timer(unsigned long data)
4674{
4675 struct ql_adapter *qdev = (struct ql_adapter *)data;
4676 u32 var = 0;
4677
4678 var = ql_read32(qdev, STS);
4679 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004680 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004681 return;
4682 }
4683
Breno Leitao72046d82010-07-01 03:00:17 +00004684 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004685}
4686
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004687static int __devinit qlge_probe(struct pci_dev *pdev,
4688 const struct pci_device_id *pci_entry)
4689{
4690 struct net_device *ndev = NULL;
4691 struct ql_adapter *qdev = NULL;
4692 static int cards_found = 0;
4693 int err = 0;
4694
Ron Mercer1e213302009-03-09 10:59:21 +00004695 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4696 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004697 if (!ndev)
4698 return -ENOMEM;
4699
4700 err = ql_init_device(pdev, ndev, cards_found);
4701 if (err < 0) {
4702 free_netdev(ndev);
4703 return err;
4704 }
4705
4706 qdev = netdev_priv(ndev);
4707 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004708 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4709 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4710 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4711 ndev->features = ndev->hw_features |
4712 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004713
4714 if (test_bit(QL_DMA64, &qdev->flags))
4715 ndev->features |= NETIF_F_HIGHDMA;
4716
4717 /*
4718 * Set up net_device structure.
4719 */
4720 ndev->tx_queue_len = qdev->tx_ring_size;
4721 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004722
4723 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004724 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004725 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004726
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004727 err = register_netdev(ndev);
4728 if (err) {
4729 dev_err(&pdev->dev, "net device registration failed.\n");
4730 ql_release_all(pdev);
4731 pci_disable_device(pdev);
4732 return err;
4733 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004734 /* Start up the timer to trigger EEH if
4735 * the bus goes dead
4736 */
4737 init_timer_deferrable(&qdev->timer);
4738 qdev->timer.data = (unsigned long)qdev;
4739 qdev->timer.function = ql_timer;
4740 qdev->timer.expires = jiffies + (5*HZ);
4741 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004742 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004743 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004744 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004745 cards_found++;
4746 return 0;
4747}
4748
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004749netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4750{
4751 return qlge_send(skb, ndev);
4752}
4753
4754int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4755{
4756 return ql_clean_inbound_rx_ring(rx_ring, budget);
4757}
4758
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004759static void __devexit qlge_remove(struct pci_dev *pdev)
4760{
4761 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004762 struct ql_adapter *qdev = netdev_priv(ndev);
4763 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004764 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004765 unregister_netdev(ndev);
4766 ql_release_all(pdev);
4767 pci_disable_device(pdev);
4768 free_netdev(ndev);
4769}
4770
Ron Mercer6d190c62009-10-28 08:39:20 +00004771/* Clean up resources without touching hardware. */
4772static void ql_eeh_close(struct net_device *ndev)
4773{
4774 int i;
4775 struct ql_adapter *qdev = netdev_priv(ndev);
4776
4777 if (netif_carrier_ok(ndev)) {
4778 netif_carrier_off(ndev);
4779 netif_stop_queue(ndev);
4780 }
4781
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004782 /* Disabling the timer */
4783 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004784 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004785
4786 for (i = 0; i < qdev->rss_ring_count; i++)
4787 netif_napi_del(&qdev->rx_ring[i].napi);
4788
4789 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4790 ql_tx_ring_clean(qdev);
4791 ql_free_rx_buffers(qdev);
4792 ql_release_adapter_resources(qdev);
4793}
4794
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004795/*
4796 * This callback is called by the PCI subsystem whenever
4797 * a PCI bus error is detected.
4798 */
4799static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4800 enum pci_channel_state state)
4801{
4802 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004803 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004804
Ron Mercer6d190c62009-10-28 08:39:20 +00004805 switch (state) {
4806 case pci_channel_io_normal:
4807 return PCI_ERS_RESULT_CAN_RECOVER;
4808 case pci_channel_io_frozen:
4809 netif_device_detach(ndev);
4810 if (netif_running(ndev))
4811 ql_eeh_close(ndev);
4812 pci_disable_device(pdev);
4813 return PCI_ERS_RESULT_NEED_RESET;
4814 case pci_channel_io_perm_failure:
4815 dev_err(&pdev->dev,
4816 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004817 ql_eeh_close(ndev);
4818 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004819 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004820 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004821
4822 /* Request a slot reset. */
4823 return PCI_ERS_RESULT_NEED_RESET;
4824}
4825
4826/*
4827 * This callback is called after the PCI buss has been reset.
4828 * Basically, this tries to restart the card from scratch.
4829 * This is a shortened version of the device probe/discovery code,
4830 * it resembles the first-half of the () routine.
4831 */
4832static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4833{
4834 struct net_device *ndev = pci_get_drvdata(pdev);
4835 struct ql_adapter *qdev = netdev_priv(ndev);
4836
Ron Mercer6d190c62009-10-28 08:39:20 +00004837 pdev->error_state = pci_channel_io_normal;
4838
4839 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004840 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004841 netif_err(qdev, ifup, qdev->ndev,
4842 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004843 return PCI_ERS_RESULT_DISCONNECT;
4844 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004845 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004846
4847 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004848 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004849 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004850 return PCI_ERS_RESULT_DISCONNECT;
4851 }
4852
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004853 return PCI_ERS_RESULT_RECOVERED;
4854}
4855
4856static void qlge_io_resume(struct pci_dev *pdev)
4857{
4858 struct net_device *ndev = pci_get_drvdata(pdev);
4859 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004860 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004861
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004862 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004863 err = qlge_open(ndev);
4864 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004865 netif_err(qdev, ifup, qdev->ndev,
4866 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004867 return;
4868 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004869 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004870 netif_err(qdev, ifup, qdev->ndev,
4871 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004872 }
Breno Leitao72046d82010-07-01 03:00:17 +00004873 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004874 netif_device_attach(ndev);
4875}
4876
4877static struct pci_error_handlers qlge_err_handler = {
4878 .error_detected = qlge_io_error_detected,
4879 .slot_reset = qlge_io_slot_reset,
4880 .resume = qlge_io_resume,
4881};
4882
4883static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4884{
4885 struct net_device *ndev = pci_get_drvdata(pdev);
4886 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004887 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004888
4889 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004890 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004891
4892 if (netif_running(ndev)) {
4893 err = ql_adapter_down(qdev);
4894 if (!err)
4895 return err;
4896 }
4897
Ron Mercerbc083ce2009-10-21 11:07:40 +00004898 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004899 err = pci_save_state(pdev);
4900 if (err)
4901 return err;
4902
4903 pci_disable_device(pdev);
4904
4905 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4906
4907 return 0;
4908}
4909
David S. Miller04da2cf2008-09-19 16:14:24 -07004910#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004911static int qlge_resume(struct pci_dev *pdev)
4912{
4913 struct net_device *ndev = pci_get_drvdata(pdev);
4914 struct ql_adapter *qdev = netdev_priv(ndev);
4915 int err;
4916
4917 pci_set_power_state(pdev, PCI_D0);
4918 pci_restore_state(pdev);
4919 err = pci_enable_device(pdev);
4920 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004921 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004922 return err;
4923 }
4924 pci_set_master(pdev);
4925
4926 pci_enable_wake(pdev, PCI_D3hot, 0);
4927 pci_enable_wake(pdev, PCI_D3cold, 0);
4928
4929 if (netif_running(ndev)) {
4930 err = ql_adapter_up(qdev);
4931 if (err)
4932 return err;
4933 }
4934
Breno Leitao72046d82010-07-01 03:00:17 +00004935 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004936 netif_device_attach(ndev);
4937
4938 return 0;
4939}
David S. Miller04da2cf2008-09-19 16:14:24 -07004940#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004941
4942static void qlge_shutdown(struct pci_dev *pdev)
4943{
4944 qlge_suspend(pdev, PMSG_SUSPEND);
4945}
4946
4947static struct pci_driver qlge_driver = {
4948 .name = DRV_NAME,
4949 .id_table = qlge_pci_tbl,
4950 .probe = qlge_probe,
4951 .remove = __devexit_p(qlge_remove),
4952#ifdef CONFIG_PM
4953 .suspend = qlge_suspend,
4954 .resume = qlge_resume,
4955#endif
4956 .shutdown = qlge_shutdown,
4957 .err_handler = &qlge_err_handler
4958};
4959
4960static int __init qlge_init_module(void)
4961{
4962 return pci_register_driver(&qlge_driver);
4963}
4964
4965static void __exit qlge_exit(void)
4966{
4967 pci_unregister_driver(&qlge_driver);
4968}
4969
4970module_init(qlge_init_module);
4971module_exit(qlge_exit);