blob: 1ce4e08037b83abff04b1cda9cd449d347cb998a [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378
Joe Perchesae9540f72010-02-09 11:49:52 +0000379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
383 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400384
385 status =
386 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400388 if (status)
389 goto exit;
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 type); /* type */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
394 status =
395 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400397 if (status)
398 goto exit;
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 type); /* type */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
403 status =
404 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400406 if (status)
407 goto exit;
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
410 type); /* type */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
414 */
Ron Mercer76b26692009-10-08 09:54:40 +0000415 cam_output = (CAM_OUT_ROUTE_NIC |
416 (qdev->
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400423 break;
424 }
425 case MAC_ADDR_TYPE_VLAN:
426 {
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
432 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
436 index,
437 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400438
439 status =
440 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 if (status)
443 goto exit;
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
446 type | /* type */
447 enable_bit); /* enable/disable */
448 break;
449 }
450 case MAC_ADDR_TYPE_MULTI_FLTR:
451 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400454 status = -EPERM;
455 }
456exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400457 return status;
458}
459
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000460/* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
463 */
464static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465{
466 int status;
467 char zero_mac_addr[ETH_ALEN];
468 char *addr;
469
470 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000471 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000474 } else {
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000479 }
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 return status;
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000489 return status;
490}
491
Ron Mercer6a473302009-07-02 06:06:12 +0000492void ql_link_on(struct ql_adapter *qdev)
493{
Joe Perchesae9540f72010-02-09 11:49:52 +0000494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
497}
498
499void ql_link_off(struct ql_adapter *qdev)
500{
Joe Perchesae9540f72010-02-09 11:49:52 +0000501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
504}
505
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400506/* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
508 */
509int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510{
511 int status = 0;
512
Ron Mercer939678f2009-01-04 17:08:29 -0800513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400514 if (status)
515 goto exit;
516
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400520 if (status)
521 goto exit;
522 *value = ql_read32(qdev, RT_DATA);
523exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 return status;
525}
526
527/* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
531 */
532static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int enable)
534{
Ron Mercer8587ea32009-02-23 10:42:15 +0000535 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400536 u32 value = 0;
537
Joe Perchesae9540f72010-02-09 11:49:52 +0000538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400559
560 switch (mask) {
561 case RT_IDX_CAM_HIT:
562 {
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
576 {
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 break;
581 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583 {
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
588 break;
589 }
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591 {
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
596 break;
597 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
599 {
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
606 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000607 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
613 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000614 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 break;
618 }
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
620 {
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 break;
625 }
626 case 0: /* Clear the E-bit on an entry. */
627 {
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
631 break;
632 }
633 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400636 status = -EPERM;
637 goto exit;
638 }
639
640 if (value) {
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 if (status)
643 goto exit;
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 }
648exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 return status;
650}
651
652static void ql_enable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655}
656
657static void ql_disable_interrupts(struct ql_adapter *qdev)
658{
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660}
661
662/* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300665 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400666 * a worker finishes. Once it hits zero we enable the interrupt.
667 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 u32 var = 0;
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
673
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
677 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400678 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700679 ctx->intr_en_mask);
680 var = ql_read32(qdev, STS);
681 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
687 ctx->intr_en_mask);
688 var = ql_read32(qdev, STS);
689 }
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400692}
693
694static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695{
696 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700697 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400698
Ron Mercerbb0d2152008-10-20 10:30:26 -0700699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
701 */
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 return 0;
704
705 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000706 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400708 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700709 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400710 var = ql_read32(qdev, STS);
711 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700712 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000713 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400714 return var;
715}
716
717static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718{
719 int i;
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
724 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726 i == 0))
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400728 ql_enable_completion_interrupt(qdev, i);
729 }
730
731}
732
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000733static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734{
735 int status, i;
736 u16 csum = 0;
737 __le16 *flash = (__le16 *)&qdev->flash;
738
739 status = strncmp((char *)&qdev->flash, str, 4);
740 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000742 return status;
743 }
744
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
747
748 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000751
752 return csum;
753}
754
Ron Mercer26351472009-02-02 13:53:57 -0800755static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400756{
757 int status = 0;
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 if (status)
762 goto exit;
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 if (status)
769 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
773 */
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400775exit:
776 return status;
777}
778
Ron Mercercdca8d02009-03-02 08:07:31 +0000779static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780{
781 u32 i, size;
782 int status;
783 __le32 *p = (__le32 *)&qdev->flash;
784 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000785 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000786
787 /* Get flash offset for function and adjust
788 * for dword access.
789 */
Ron Mercere4552f52009-06-09 05:39:32 +0000790 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792 else
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 return -ETIMEDOUT;
797
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
801 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000804 goto exit;
805 }
806 }
807
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
810 "8000");
811 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000813 status = -EINVAL;
814 goto exit;
815 }
816
Ron Mercer542512e2009-06-09 05:39:33 +0000817 /* Extract either manufacturer or BOFM modified
818 * MAC address.
819 */
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
821 memcpy(mac_addr,
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
824 else
825 memcpy(mac_addr,
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
828
829 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000831 status = -EINVAL;
832 goto exit;
833 }
834
835 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000836 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000837 qdev->ndev->addr_len);
838
839exit:
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
841 return status;
842}
843
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000844static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845{
846 int i;
847 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800848 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800849 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800851
852 /* Second function's parameters follow the first
853 * function's.
854 */
Ron Mercere4552f52009-06-09 05:39:32 +0000855 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000856 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400857
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 return -ETIMEDOUT;
860
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000861 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800862 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400863 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400866 goto exit;
867 }
868
869 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000870
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
873 "8012");
874 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000876 status = -EINVAL;
877 goto exit;
878 }
879
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881 status = -EINVAL;
882 goto exit;
883 }
884
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
888
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400889exit:
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
891 return status;
892}
893
894/* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
897 */
898static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899{
900 int status;
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 if (status)
905 return status;
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
910 return status;
911}
912
913/* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
916 */
917int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918{
919 int status = 0;
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 if (status)
924 goto exit;
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930 if (status)
931 goto exit;
932 /* get the data */
933 *data = ql_read32(qdev, XGMAC_DATA);
934exit:
935 return status;
936}
937
938/* This is used for reading the 64-bit statistics regs. */
939int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940{
941 int status = 0;
942 u32 hi = 0;
943 u32 lo = 0;
944
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
946 if (status)
947 goto exit;
948
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950 if (status)
951 goto exit;
952
953 *data = (u64) lo | ((u64) hi << 32);
954
955exit:
956 return status;
957}
958
Ron Mercercdca8d02009-03-02 08:07:31 +0000959static int ql_8000_port_initialize(struct ql_adapter *qdev)
960{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000961 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000962 /*
963 * Get MPI firmware version for driver banner
964 * and ethool info.
965 */
966 status = ql_mb_about_fw(qdev);
967 if (status)
968 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000969 status = ql_mb_get_fw_state(qdev);
970 if (status)
971 goto exit;
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974exit:
975 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000976}
977
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400978/* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
982 * later date.
983 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000984static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400985{
986 int status = 0;
987 u32 data;
988
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
992 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400999 }
1000 return status;
1001 }
1002
Joe Perchesae9540f72010-02-09 11:49:52 +00001003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 if (status)
1007 goto end;
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010 if (status)
1011 goto end;
1012
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019 if (status)
1020 goto end;
1021
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 if (status)
1025 goto end;
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029 if (status)
1030 goto end;
1031
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 if (status)
1035 goto end;
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 if (status)
1040 goto end;
1041
1042 /* Turn on jumbo. */
1043 status =
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045 if (status)
1046 goto end;
1047 status =
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049 if (status)
1050 goto end;
1051
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054end:
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056 return status;
1057}
1058
Ron Mercer7c734352009-10-19 03:32:19 +00001059static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060{
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1072 return lbq_desc;
1073}
1074
Ron Mercer7c734352009-10-19 03:32:19 +00001075static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1077{
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001081 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1084
1085 /* If it's the last chunk of our master page then
1086 * we unmap it.
1087 */
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1094 return lbq_desc;
1095}
1096
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001098static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001099{
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1105 return sbq_desc;
1106}
1107
1108/* Update an rx ring index. */
1109static void ql_update_cq(struct rx_ring *rx_ring)
1110{
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1116 }
1117}
1118
1119static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120{
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122}
1123
Ron Mercer7c734352009-10-19 03:32:19 +00001124static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1126{
1127 if (!rx_ring->pg_chunk.page) {
1128 u64 map;
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130 GFP_ATOMIC,
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001135 return -ENOMEM;
1136 }
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001146 return -ENOMEM;
1147 }
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 }
1151
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1154 */
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157 /* Adjust the master page chunk for next
1158 * buffer get.
1159 */
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1164 } else {
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1168 }
1169 return 0;
1170}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171/* Process (refill) a large buffer queue. */
1172static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173{
Ron Mercer49f21862009-02-23 10:42:16 +00001174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001177 u64 map;
1178 int i;
1179
Ron Mercer7c734352009-10-19 03:32:19 +00001180 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001181 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1184 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1189 return;
1190 }
Ron Mercer7c734352009-10-19 03:32:19 +00001191
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001196 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001197 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001198
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001202 clean_idx++;
1203 if (clean_idx == rx_ring->lbq_len)
1204 clean_idx = 0;
1205 }
1206
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001211 rx_ring->lbq_free_cnt -= 16;
1212 }
1213
1214 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001220 }
1221}
1222
1223/* Process (refill) a small buffer queue. */
1224static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225{
Ron Mercer49f21862009-02-23 10:42:16 +00001226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001229 u64 map;
1230 int i;
1231
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1237 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001238 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001239 netif_printk(qdev, rx_status, KERN_DEBUG,
1240 qdev->ndev,
1241 "sbq: getting new skb for index %d.\n",
1242 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001243 sbq_desc->p.skb =
1244 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001245 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001246 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001249 rx_ring->sbq_clean_idx = clean_idx;
1250 return;
1251 }
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001257 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001260 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001263 return;
1264 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001267 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001268 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001269 }
1270
1271 clean_idx++;
1272 if (clean_idx == rx_ring->sbq_len)
1273 clean_idx = 0;
1274 }
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001279 rx_ring->sbq_free_cnt -= 16;
1280 }
1281
1282 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 }
1289}
1290
1291static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1293{
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1296}
1297
1298/* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1300 */
1301static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1303{
1304 int i;
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1307 /*
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1314 * then its an OAL.
1315 */
1316 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001317 netif_printk(qdev, tx_done, KERN_DEBUG,
1318 qdev->ndev,
1319 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001320 }
1321 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001322 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001324 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001325 maplen),
1326 PCI_DMA_TODEVICE);
1327 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001330 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001331 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001332 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001333 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001334 maplen), PCI_DMA_TODEVICE);
1335 }
1336 }
1337
1338}
1339
1340/* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342 */
1343static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346{
1347 int len = skb_headlen(skb);
1348 dma_addr_t map;
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001356 }
1357 /*
1358 * Map the skb buffer first.
1359 */
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1363 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001366
1367 return NETDEV_TX_BUSY;
1368 }
1369
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001374 map_idx++;
1375
1376 /*
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1381 * in this list.
1382 */
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385 tbd++;
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1389 * look like this:
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1403 * etc...
1404 */
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 sizeof(struct oal),
1408 PCI_DMA_TODEVICE);
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1410 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1413 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001414 goto map_error;
1415 }
1416
1417 tbd->addr = cpu_to_le64(map);
1418 /*
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1422 */
1423 tbd->len =
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001427 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431 map_idx++;
1432 }
1433
Eric Dumazet9e903e02011-10-18 21:00:24 +00001434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001435 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001436
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001437 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001438 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1441 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001442 goto map_error;
1443 }
1444
1445 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001446 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001449 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001450
1451 }
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1457
1458map_error:
1459 /*
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1464 */
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1467}
1468
Ron Mercer4f848c02010-01-02 10:37:43 +00001469/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001470static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 u32 length,
1474 u16 vlan_id)
1475{
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001478 struct napi_struct *napi = &rx_ring->napi;
1479
1480 napi->dev = qdev->ndev;
1481
1482 skb = napi_get_frags(napi);
1483 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1488 return;
1489 }
1490 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1494 length);
Ron Mercer63526712010-01-02 10:37:44 +00001495
1496 skb->len += length;
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1500
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001508}
1509
1510/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001511static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1514 u32 length,
1515 u16 vlan_id)
1516{
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1519 void *addr;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1522
1523 skb = netdev_alloc_skb(ndev, length);
1524 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1529 return;
1530 }
1531
1532 addr = lbq_desc->p.pg_chunk.va;
1533 prefetch(addr);
1534
1535
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001538 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001540 rx_ring->rx_errors++;
1541 goto err_out;
1542 }
1543
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1546 */
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001550 rx_ring->rx_dropped++;
1551 goto err_out;
1552 }
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559 length-ETH_HLEN);
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1563
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001567 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001568
Michał Mirosław88230fd2011-04-18 13:31:21 +00001569 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571 /* TCP frame. */
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data;
1580 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001583 netif_printk(qdev, rx_status, KERN_DEBUG,
1584 qdev->ndev,
1585 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001586 }
1587 }
1588 }
1589
1590 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001591 if (vlan_id != 0xffff)
1592 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 napi_gro_receive(napi, skb);
1595 else
1596 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001597 return;
1598err_out:
1599 dev_kfree_skb_any(skb);
1600 put_page(lbq_desc->p.pg_chunk.page);
1601}
1602
1603/* Process an inbound completion from an rx ring. */
1604static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 struct rx_ring *rx_ring,
1606 struct ib_mac_iocb_rsp *ib_mac_rsp,
1607 u32 length,
1608 u16 vlan_id)
1609{
1610 struct net_device *ndev = qdev->ndev;
1611 struct sk_buff *skb = NULL;
1612 struct sk_buff *new_skb = NULL;
1613 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615 skb = sbq_desc->p.skb;
1616 /* Allocate new_skb and copy */
1617 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001619 netif_err(qdev, probe, qdev->ndev,
1620 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001621 rx_ring->rx_dropped++;
1622 return;
1623 }
1624 skb_reserve(new_skb, NET_IP_ALIGN);
1625 memcpy(skb_put(new_skb, length), skb->data, length);
1626 skb = new_skb;
1627
1628 /* Frame error, so drop the packet. */
1629 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001630 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001631 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_errors++;
1634 return;
1635 }
1636
1637 /* loopback self test for ethtool */
1638 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639 ql_check_lb_frame(qdev, skb);
1640 dev_kfree_skb_any(skb);
1641 return;
1642 }
1643
1644 /* The max framesize filter on this chip is set higher than
1645 * MTU since FCoE uses 2k frames.
1646 */
1647 if (skb->len > ndev->mtu + ETH_HLEN) {
1648 dev_kfree_skb_any(skb);
1649 rx_ring->rx_dropped++;
1650 return;
1651 }
1652
1653 prefetch(skb->data);
1654 skb->dev = ndev;
1655 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001656 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657 "%s Multicast.\n",
1658 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001664 }
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001668
1669 rx_ring->rx_packets++;
1670 rx_ring->rx_bytes += skb->len;
1671 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001672 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001673
1674 /* If rx checksum is on, and there are no
1675 * csum or frame errors.
1676 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001677 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001678 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679 /* TCP frame. */
1680 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001681 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001683 skb->ip_summed = CHECKSUM_UNNECESSARY;
1684 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686 /* Unfragmented ipv4 UDP frame. */
1687 struct iphdr *iph = (struct iphdr *) skb->data;
1688 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001689 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001691 netif_printk(qdev, rx_status, KERN_DEBUG,
1692 qdev->ndev,
1693 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001694 }
1695 }
1696 }
1697
1698 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001699 if (vlan_id != 0xffff)
1700 __vlan_hwaccel_put_tag(skb, vlan_id);
1701 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 napi_gro_receive(&rx_ring->napi, skb);
1703 else
1704 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001705}
1706
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001707static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001708{
1709 void *temp_addr = skb->data;
1710
1711 /* Undo the skb_reserve(skb,32) we did before
1712 * giving to hardware, and realign data on
1713 * a 2-byte boundary.
1714 */
1715 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 skb_copy_to_linear_data(skb, temp_addr,
1718 (unsigned int)len);
1719}
1720
1721/*
1722 * This function builds an skb for the given inbound
1723 * completion. It will be rewritten for readability in the near
1724 * future, but for not it works well.
1725 */
1726static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727 struct rx_ring *rx_ring,
1728 struct ib_mac_iocb_rsp *ib_mac_rsp)
1729{
1730 struct bq_desc *lbq_desc;
1731 struct bq_desc *sbq_desc;
1732 struct sk_buff *skb = NULL;
1733 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736 /*
1737 * Handle the header buffer if present.
1738 */
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001741 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001743 /*
1744 * Headers fit nicely into a small buffer.
1745 */
1746 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001748 dma_unmap_addr(sbq_desc, mapaddr),
1749 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 PCI_DMA_FROMDEVICE);
1751 skb = sbq_desc->p.skb;
1752 ql_realign_skb(skb, hdr_len);
1753 skb_put(skb, hdr_len);
1754 sbq_desc->p.skb = NULL;
1755 }
1756
1757 /*
1758 * Handle the data buffer(s).
1759 */
1760 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001761 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001763 return skb;
1764 }
1765
1766 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "Headers in small, data of %d bytes in small, combine them.\n",
1770 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001771 /*
1772 * Data is less than small buffer size so it's
1773 * stuffed in a small buffer.
1774 * For this case we append the data
1775 * from the "data" small buffer to the "header" small
1776 * buffer.
1777 */
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001780 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001781 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001782 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001783 (sbq_desc, maplen),
1784 PCI_DMA_FROMDEVICE);
1785 memcpy(skb_put(skb, length),
1786 sbq_desc->p.skb->data, length);
1787 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001788 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001789 (sbq_desc,
1790 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001791 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001792 (sbq_desc,
1793 maplen),
1794 PCI_DMA_FROMDEVICE);
1795 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "%d bytes in a single small buffer.\n",
1798 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001799 sbq_desc = ql_get_curr_sbuf(rx_ring);
1800 skb = sbq_desc->p.skb;
1801 ql_realign_skb(skb, length);
1802 skb_put(skb, length);
1803 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001806 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001807 maplen),
1808 PCI_DMA_FROMDEVICE);
1809 sbq_desc->p.skb = NULL;
1810 }
1811 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "Header in small, %d bytes in large. Chain large to small!\n",
1815 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 /*
1817 * The data is in a single large buffer. We
1818 * chain it to the header buffer's skb and let
1819 * it rip.
1820 */
Ron Mercer7c734352009-10-19 03:32:19 +00001821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001822 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823 "Chaining page at offset = %d, for %d bytes to skb.\n",
1824 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001825 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826 lbq_desc->p.pg_chunk.offset,
1827 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 skb->len += length;
1829 skb->data_len += length;
1830 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001831 } else {
1832 /*
1833 * The headers and data are in a single large buffer. We
1834 * copy it to a new skb and let it go. This can happen with
1835 * jumbo mtu on a non-TCP/UDP frame.
1836 */
Ron Mercer7c734352009-10-19 03:32:19 +00001837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001838 skb = netdev_alloc_skb(qdev->ndev, length);
1839 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001840 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001842 return NULL;
1843 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001844 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001845 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001846 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001847 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001848 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001850 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001853 skb_fill_page_desc(skb, 0,
1854 lbq_desc->p.pg_chunk.page,
1855 lbq_desc->p.pg_chunk.offset,
1856 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001857 skb->len += length;
1858 skb->data_len += length;
1859 skb->truesize += length;
1860 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001861 __pskb_pull_tail(skb,
1862 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863 VLAN_ETH_HLEN : ETH_HLEN);
1864 }
1865 } else {
1866 /*
1867 * The data is in a chain of large buffers
1868 * pointed to by a small buffer. We loop
1869 * thru and chain them to the our small header
1870 * buffer's skb.
1871 * frags: There are 18 max frags and our small
1872 * buffer will hold 32 of them. The thing is,
1873 * we'll use 3 max for our 9000 byte jumbo
1874 * frames. If the MTU goes up we could
1875 * eventually be in trouble.
1876 */
Ron Mercer7c734352009-10-19 03:32:19 +00001877 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001880 dma_unmap_addr(sbq_desc, mapaddr),
1881 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001882 PCI_DMA_FROMDEVICE);
1883 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884 /*
1885 * This is an non TCP/UDP IP frame, so
1886 * the headers aren't split into a small
1887 * buffer. We have to use the small buffer
1888 * that contains our sg list as our skb to
1889 * send upstairs. Copy the sg list here to
1890 * a local buffer and use it to find the
1891 * pages to chain.
1892 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001893 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894 "%d bytes of headers & data in chain of large.\n",
1895 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001897 sbq_desc->p.skb = NULL;
1898 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899 }
1900 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001901 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902 size = (length < rx_ring->lbq_buf_size) ? length :
1903 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001904
Joe Perchesae9540f72010-02-09 11:49:52 +00001905 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 "Adding page %d to skb for %d bytes.\n",
1907 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001908 skb_fill_page_desc(skb, i,
1909 lbq_desc->p.pg_chunk.page,
1910 lbq_desc->p.pg_chunk.offset,
1911 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001912 skb->len += size;
1913 skb->data_len += size;
1914 skb->truesize += size;
1915 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001916 i++;
1917 }
1918 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919 VLAN_ETH_HLEN : ETH_HLEN);
1920 }
1921 return skb;
1922}
1923
1924/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001925static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001926 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001927 struct ib_mac_iocb_rsp *ib_mac_rsp,
1928 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929{
1930 struct net_device *ndev = qdev->ndev;
1931 struct sk_buff *skb = NULL;
1932
1933 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001939 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001940 return;
1941 }
1942
Ron Mercera32959c2009-06-09 05:39:27 +00001943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001945 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001946 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001947 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001948 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001949 return;
1950 }
Ron Mercerec33a492009-06-09 05:39:28 +00001951
1952 /* The max framesize filter on this chip is set higher than
1953 * MTU since FCoE uses 2k frames.
1954 */
1955 if (skb->len > ndev->mtu + ETH_HLEN) {
1956 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001957 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001958 return;
1959 }
1960
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001961 /* loopback self test for ethtool */
1962 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963 ql_check_lb_frame(qdev, skb);
1964 dev_kfree_skb_any(skb);
1965 return;
1966 }
1967
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001968 prefetch(skb->data);
1969 skb->dev = ndev;
1970 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001978 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001979 }
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001981 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001983 }
Ron Mercerd555f592009-03-09 10:59:19 +00001984
Ron Mercerd555f592009-03-09 10:59:19 +00001985 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001986 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001987
1988 /* If rx checksum is on, and there are no
1989 * csum or frame errors.
1990 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001991 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001992 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1993 /* TCP frame. */
1994 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001995 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001997 skb->ip_summed = CHECKSUM_UNNECESSARY;
1998 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000 /* Unfragmented ipv4 UDP frame. */
2001 struct iphdr *iph = (struct iphdr *) skb->data;
2002 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002003 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002004 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002007 }
2008 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002009 }
Ron Mercerd555f592009-03-09 10:59:19 +00002010
Ron Mercer885ee392009-11-03 13:49:31 +00002011 rx_ring->rx_packets++;
2012 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002013 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002014 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015 __vlan_hwaccel_put_tag(skb, vlan_id);
2016 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017 napi_gro_receive(&rx_ring->napi, skb);
2018 else
2019 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002020}
2021
Ron Mercer4f848c02010-01-02 10:37:43 +00002022/* Process an inbound completion from an rx ring. */
2023static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024 struct rx_ring *rx_ring,
2025 struct ib_mac_iocb_rsp *ib_mac_rsp)
2026{
2027 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2031
2032 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2033
2034 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035 /* The data and headers are split into
2036 * separate buffers.
2037 */
2038 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2039 vlan_id);
2040 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041 /* The data fit in a single small buffer.
2042 * Allocate a new skb, copy the data and
2043 * return the buffer to the free pool.
2044 */
2045 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2046 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002047 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050 /* TCP packet in a page chunk that's been checksummed.
2051 * Tack it on to our GRO skb and let it go.
2052 */
2053 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2054 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002055 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056 /* Non-TCP packet in a page chunk. Allocate an
2057 * skb, tack it on frags, and send it up.
2058 */
2059 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2060 length, vlan_id);
2061 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002062 /* Non-TCP/UDP large frames that span multiple buffers
2063 * can be processed corrrectly by the split frame logic.
2064 */
2065 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002067 }
2068
2069 return (unsigned long)length;
2070}
2071
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002072/* Process an outbound completion from an rx ring. */
2073static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 struct ob_mac_iocb_rsp *mac_rsp)
2075{
2076 struct tx_ring *tx_ring;
2077 struct tx_ring_desc *tx_ring_desc;
2078
2079 QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002083 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002085 dev_kfree_skb(tx_ring_desc->skb);
2086 tx_ring_desc->skb = NULL;
2087
2088 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089 OB_MAC_IOCB_RSP_S |
2090 OB_MAC_IOCB_RSP_L |
2091 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002093 netif_warn(qdev, tx_done, qdev->ndev,
2094 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002095 }
2096 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002097 netif_warn(qdev, tx_done, qdev->ndev,
2098 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002099 }
2100 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002101 netif_warn(qdev, tx_done, qdev->ndev,
2102 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002103 }
2104 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002105 netif_warn(qdev, tx_done, qdev->ndev,
2106 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002107 }
2108 }
2109 atomic_inc(&tx_ring->tx_count);
2110}
2111
2112/* Fire up a handler to reset the MPI processor. */
2113void ql_queue_fw_error(struct ql_adapter *qdev)
2114{
Ron Mercer6a473302009-07-02 06:06:12 +00002115 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002116 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117}
2118
2119void ql_queue_asic_error(struct ql_adapter *qdev)
2120{
Ron Mercer6a473302009-07-02 06:06:12 +00002121 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2125 * thread
2126 */
2127 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002128 /* Set asic recovery bit to indicate reset process that we are
2129 * in fatal error recovery process rather than normal close
2130 */
2131 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2133}
2134
2135static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136 struct ib_ae_iocb_rsp *ib_ae_rsp)
2137{
2138 switch (ib_ae_rsp->event) {
2139 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002140 netif_err(qdev, rx_err, qdev->ndev,
2141 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002142 ql_queue_fw_error(qdev);
2143 return;
2144
2145 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002146 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002148 ql_queue_asic_error(qdev);
2149 return;
2150
2151 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002152 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002153 ql_queue_asic_error(qdev);
2154 break;
2155
2156 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002157 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158 "anonymous buffers from rx_ring %d.\n",
2159 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160 ql_queue_asic_error(qdev);
2161 break;
2162
2163 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002164 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2165 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 ql_queue_asic_error(qdev);
2167 break;
2168 }
2169}
2170
2171static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2172{
2173 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002174 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002175 struct ob_mac_iocb_rsp *net_rsp = NULL;
2176 int count = 0;
2177
Ron Mercer1e213302009-03-09 10:59:21 +00002178 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002179 /* While there are entries in the completion queue. */
2180 while (prod != rx_ring->cnsmr_idx) {
2181
Joe Perchesae9540f72010-02-09 11:49:52 +00002182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185
2186 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2187 rmb();
2188 switch (net_rsp->opcode) {
2189
2190 case OPCODE_OB_MAC_TSO_IOCB:
2191 case OPCODE_OB_MAC_IOCB:
2192 ql_process_mac_tx_intr(qdev, net_rsp);
2193 break;
2194 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002195 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002198 }
2199 count++;
2200 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002201 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002202 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002203 if (!net_rsp)
2204 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002205 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002206 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002207 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002208 if (atomic_read(&tx_ring->queue_stopped) &&
2209 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2210 /*
2211 * The queue got stopped because the tx_ring was full.
2212 * Wake it up, because it's now at least 25% empty.
2213 */
Ron Mercer1e213302009-03-09 10:59:21 +00002214 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002215 }
2216
2217 return count;
2218}
2219
2220static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2221{
2222 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002223 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 struct ql_net_rsp_iocb *net_rsp;
2225 int count = 0;
2226
2227 /* While there are entries in the completion queue. */
2228 while (prod != rx_ring->cnsmr_idx) {
2229
Joe Perchesae9540f72010-02-09 11:49:52 +00002230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002233
2234 net_rsp = rx_ring->curr_entry;
2235 rmb();
2236 switch (net_rsp->opcode) {
2237 case OPCODE_IB_MAC_IOCB:
2238 ql_process_mac_rx_intr(qdev, rx_ring,
2239 (struct ib_mac_iocb_rsp *)
2240 net_rsp);
2241 break;
2242
2243 case OPCODE_IB_AE_IOCB:
2244 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2245 net_rsp);
2246 break;
2247 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250 net_rsp->opcode);
2251 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002252 }
2253 count++;
2254 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002255 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002256 if (count == budget)
2257 break;
2258 }
2259 ql_update_buffer_queues(qdev, rx_ring);
2260 ql_write_cq_idx(rx_ring);
2261 return count;
2262}
2263
2264static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2265{
2266 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002268 struct rx_ring *trx_ring;
2269 int i, work_done = 0;
2270 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271
Joe Perchesae9540f72010-02-09 11:49:52 +00002272 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002274
Ron Mercer39aa8162009-08-27 11:02:11 +00002275 /* Service the TX rings first. They start
2276 * right after the RSS rings. */
2277 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278 trx_ring = &qdev->rx_ring[i];
2279 /* If this TX completion ring belongs to this vector and
2280 * it's not empty then service it.
2281 */
2282 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002285 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286 "%s: Servicing TX completion ring %d.\n",
2287 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002288 ql_clean_outbound_rx_ring(trx_ring);
2289 }
2290 }
2291
2292 /*
2293 * Now service the RSS ring if it's active.
2294 */
2295 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002297 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298 "%s: Servicing RX completion ring %d.\n",
2299 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002300 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2301 }
2302
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002303 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002304 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002305 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306 }
2307 return work_done;
2308}
2309
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002310static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002311{
2312 struct ql_adapter *qdev = netdev_priv(ndev);
2313
Jiri Pirko18c49b92011-07-21 03:24:11 +00002314 if (features & NETIF_F_HW_VLAN_RX) {
2315 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002316 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002317 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002318 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002319 } else {
Jiri Pirko18c49b92011-07-21 03:24:11 +00002320 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002321 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002322 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323 }
2324}
2325
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002326static netdev_features_t qlge_fix_features(struct net_device *ndev,
2327 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002328{
2329 /*
2330 * Since there is no support for separate rx/tx vlan accel
2331 * enable/disable make sure tx flag is always in same state as rx.
2332 */
2333 if (features & NETIF_F_HW_VLAN_RX)
2334 features |= NETIF_F_HW_VLAN_TX;
2335 else
2336 features &= ~NETIF_F_HW_VLAN_TX;
2337
2338 return features;
2339}
2340
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002341static int qlge_set_features(struct net_device *ndev,
2342 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002343{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002344 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002345
2346 if (changed & NETIF_F_HW_VLAN_RX)
2347 qlge_vlan_mode(ndev, features);
2348
2349 return 0;
2350}
2351
2352static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2353{
2354 u32 enable_bit = MAC_ADDR_E;
2355
2356 if (ql_set_mac_addr_reg
2357 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2358 netif_err(qdev, ifup, qdev->ndev,
2359 "Failed to init vlan address.\n");
2360 }
2361}
2362
Ron Mercer01e6b952009-10-30 12:13:34 +00002363static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002364{
2365 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002366 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002367
Ron Mercercc288f52009-02-23 10:42:14 +00002368 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2369 if (status)
2370 return;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002371
2372 __qlge_vlan_rx_add_vid(qdev, vid);
2373 set_bit(vid, qdev->active_vlans);
2374
Ron Mercercc288f52009-02-23 10:42:14 +00002375 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002376}
2377
Jiri Pirko18c49b92011-07-21 03:24:11 +00002378static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002379{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002380 u32 enable_bit = 0;
2381
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002382 if (ql_set_mac_addr_reg
2383 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002384 netif_err(qdev, ifup, qdev->ndev,
2385 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002386 }
Jiri Pirko18c49b92011-07-21 03:24:11 +00002387}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002388
Jiri Pirko18c49b92011-07-21 03:24:11 +00002389static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2390{
2391 struct ql_adapter *qdev = netdev_priv(ndev);
2392 int status;
2393
2394 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2395 if (status)
2396 return;
2397
2398 __qlge_vlan_rx_kill_vid(qdev, vid);
2399 clear_bit(vid, qdev->active_vlans);
2400
2401 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002402}
2403
Ron Mercerc1b60092010-10-27 04:58:12 +00002404static void qlge_restore_vlan(struct ql_adapter *qdev)
2405{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002406 int status;
2407 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002408
Jiri Pirko18c49b92011-07-21 03:24:11 +00002409 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2410 if (status)
2411 return;
2412
2413 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2414 __qlge_vlan_rx_add_vid(qdev, vid);
2415
2416 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002417}
2418
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002419/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2420static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2421{
2422 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002423 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002424 return IRQ_HANDLED;
2425}
2426
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002427/* This handles a fatal error, MPI activity, and the default
2428 * rx_ring in an MSI-X multiple vector environment.
2429 * In MSI/Legacy environment it also process the rest of
2430 * the rx_rings.
2431 */
2432static irqreturn_t qlge_isr(int irq, void *dev_id)
2433{
2434 struct rx_ring *rx_ring = dev_id;
2435 struct ql_adapter *qdev = rx_ring->qdev;
2436 struct intr_context *intr_context = &qdev->intr_context[0];
2437 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002438 int work_done = 0;
2439
Ron Mercerbb0d2152008-10-20 10:30:26 -07002440 spin_lock(&qdev->hw_lock);
2441 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002442 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2443 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002444 spin_unlock(&qdev->hw_lock);
2445 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002446 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002447 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448
Ron Mercerbb0d2152008-10-20 10:30:26 -07002449 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002450
2451 /*
2452 * Check for fatal error.
2453 */
2454 if (var & STS_FE) {
2455 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002456 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002457 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002458 netdev_err(qdev->ndev, "Resetting chip. "
2459 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002460 return IRQ_HANDLED;
2461 }
2462
2463 /*
2464 * Check MPI processor activity.
2465 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002466 if ((var & STS_PI) &&
2467 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002468 /*
2469 * We've got an async event or mailbox completion.
2470 * Handle it and clear the source of the interrupt.
2471 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002472 netif_err(qdev, intr, qdev->ndev,
2473 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002474 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002475 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2476 queue_delayed_work_on(smp_processor_id(),
2477 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002478 work_done++;
2479 }
2480
2481 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002482 * Get the bit-mask that shows the active queues for this
2483 * pass. Compare it to the queues that this irq services
2484 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002485 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002486 var = ql_read32(qdev, ISR1);
2487 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002488 netif_info(qdev, intr, qdev->ndev,
2489 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002490 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002491 napi_schedule(&rx_ring->napi);
2492 work_done++;
2493 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002494 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002495 return work_done ? IRQ_HANDLED : IRQ_NONE;
2496}
2497
2498static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2499{
2500
2501 if (skb_is_gso(skb)) {
2502 int err;
2503 if (skb_header_cloned(skb)) {
2504 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2505 if (err)
2506 return err;
2507 }
2508
2509 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2510 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2511 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2512 mac_iocb_ptr->total_hdrs_len =
2513 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2514 mac_iocb_ptr->net_trans_offset =
2515 cpu_to_le16(skb_network_offset(skb) |
2516 skb_transport_offset(skb)
2517 << OB_MAC_TRANSPORT_HDR_SHIFT);
2518 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2519 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2520 if (likely(skb->protocol == htons(ETH_P_IP))) {
2521 struct iphdr *iph = ip_hdr(skb);
2522 iph->check = 0;
2523 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2524 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2525 iph->daddr, 0,
2526 IPPROTO_TCP,
2527 0);
2528 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2529 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2530 tcp_hdr(skb)->check =
2531 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2532 &ipv6_hdr(skb)->daddr,
2533 0, IPPROTO_TCP, 0);
2534 }
2535 return 1;
2536 }
2537 return 0;
2538}
2539
2540static void ql_hw_csum_setup(struct sk_buff *skb,
2541 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2542{
2543 int len;
2544 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002545 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002546 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2547 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2548 mac_iocb_ptr->net_trans_offset =
2549 cpu_to_le16(skb_network_offset(skb) |
2550 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2551
2552 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2553 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2554 if (likely(iph->protocol == IPPROTO_TCP)) {
2555 check = &(tcp_hdr(skb)->check);
2556 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2557 mac_iocb_ptr->total_hdrs_len =
2558 cpu_to_le16(skb_transport_offset(skb) +
2559 (tcp_hdr(skb)->doff << 2));
2560 } else {
2561 check = &(udp_hdr(skb)->check);
2562 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2563 mac_iocb_ptr->total_hdrs_len =
2564 cpu_to_le16(skb_transport_offset(skb) +
2565 sizeof(struct udphdr));
2566 }
2567 *check = ~csum_tcpudp_magic(iph->saddr,
2568 iph->daddr, len, iph->protocol, 0);
2569}
2570
Stephen Hemminger613573252009-08-31 19:50:58 +00002571static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002572{
2573 struct tx_ring_desc *tx_ring_desc;
2574 struct ob_mac_iocb_req *mac_iocb_ptr;
2575 struct ql_adapter *qdev = netdev_priv(ndev);
2576 int tso;
2577 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002578 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002579
2580 tx_ring = &qdev->tx_ring[tx_ring_idx];
2581
Ron Mercer74c50b42009-03-09 10:59:27 +00002582 if (skb_padto(skb, ETH_ZLEN))
2583 return NETDEV_TX_OK;
2584
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002585 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002586 netif_info(qdev, tx_queued, qdev->ndev,
2587 "%s: shutting down tx queue %d du to lack of resources.\n",
2588 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002589 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002590 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002591 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002592 return NETDEV_TX_BUSY;
2593 }
2594 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2595 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002596 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002597
2598 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2599 mac_iocb_ptr->tid = tx_ring_desc->index;
2600 /* We use the upper 32-bits to store the tx queue for this IO.
2601 * When we get the completion we can use it to establish the context.
2602 */
2603 mac_iocb_ptr->txq_idx = tx_ring_idx;
2604 tx_ring_desc->skb = skb;
2605
2606 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2607
Jesse Grosseab6d182010-10-20 13:56:03 +00002608 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002609 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2610 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002611 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2612 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2613 }
2614 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2615 if (tso < 0) {
2616 dev_kfree_skb_any(skb);
2617 return NETDEV_TX_OK;
2618 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2619 ql_hw_csum_setup(skb,
2620 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2621 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002622 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2623 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002624 netif_err(qdev, tx_queued, qdev->ndev,
2625 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002626 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002627 return NETDEV_TX_BUSY;
2628 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002629 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2630 tx_ring->prod_idx++;
2631 if (tx_ring->prod_idx == tx_ring->wq_len)
2632 tx_ring->prod_idx = 0;
2633 wmb();
2634
2635 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002636 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2637 "tx queued, slot %d, len %d\n",
2638 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002639
2640 atomic_dec(&tx_ring->tx_count);
2641 return NETDEV_TX_OK;
2642}
2643
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002644
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002645static void ql_free_shadow_space(struct ql_adapter *qdev)
2646{
2647 if (qdev->rx_ring_shadow_reg_area) {
2648 pci_free_consistent(qdev->pdev,
2649 PAGE_SIZE,
2650 qdev->rx_ring_shadow_reg_area,
2651 qdev->rx_ring_shadow_reg_dma);
2652 qdev->rx_ring_shadow_reg_area = NULL;
2653 }
2654 if (qdev->tx_ring_shadow_reg_area) {
2655 pci_free_consistent(qdev->pdev,
2656 PAGE_SIZE,
2657 qdev->tx_ring_shadow_reg_area,
2658 qdev->tx_ring_shadow_reg_dma);
2659 qdev->tx_ring_shadow_reg_area = NULL;
2660 }
2661}
2662
2663static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2664{
2665 qdev->rx_ring_shadow_reg_area =
2666 pci_alloc_consistent(qdev->pdev,
2667 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2668 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002669 netif_err(qdev, ifup, qdev->ndev,
2670 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002671 return -ENOMEM;
2672 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002673 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002674 qdev->tx_ring_shadow_reg_area =
2675 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2676 &qdev->tx_ring_shadow_reg_dma);
2677 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002678 netif_err(qdev, ifup, qdev->ndev,
2679 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002680 goto err_wqp_sh_area;
2681 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002682 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002683 return 0;
2684
2685err_wqp_sh_area:
2686 pci_free_consistent(qdev->pdev,
2687 PAGE_SIZE,
2688 qdev->rx_ring_shadow_reg_area,
2689 qdev->rx_ring_shadow_reg_dma);
2690 return -ENOMEM;
2691}
2692
2693static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2694{
2695 struct tx_ring_desc *tx_ring_desc;
2696 int i;
2697 struct ob_mac_iocb_req *mac_iocb_ptr;
2698
2699 mac_iocb_ptr = tx_ring->wq_base;
2700 tx_ring_desc = tx_ring->q;
2701 for (i = 0; i < tx_ring->wq_len; i++) {
2702 tx_ring_desc->index = i;
2703 tx_ring_desc->skb = NULL;
2704 tx_ring_desc->queue_entry = mac_iocb_ptr;
2705 mac_iocb_ptr++;
2706 tx_ring_desc++;
2707 }
2708 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2709 atomic_set(&tx_ring->queue_stopped, 0);
2710}
2711
2712static void ql_free_tx_resources(struct ql_adapter *qdev,
2713 struct tx_ring *tx_ring)
2714{
2715 if (tx_ring->wq_base) {
2716 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2717 tx_ring->wq_base, tx_ring->wq_base_dma);
2718 tx_ring->wq_base = NULL;
2719 }
2720 kfree(tx_ring->q);
2721 tx_ring->q = NULL;
2722}
2723
2724static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2725 struct tx_ring *tx_ring)
2726{
2727 tx_ring->wq_base =
2728 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2729 &tx_ring->wq_base_dma);
2730
Joe Perches8e95a202009-12-03 07:58:21 +00002731 if ((tx_ring->wq_base == NULL) ||
2732 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002733 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002734 return -ENOMEM;
2735 }
2736 tx_ring->q =
2737 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2738 if (tx_ring->q == NULL)
2739 goto err;
2740
2741 return 0;
2742err:
2743 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2744 tx_ring->wq_base, tx_ring->wq_base_dma);
2745 return -ENOMEM;
2746}
2747
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002748static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002749{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002750 struct bq_desc *lbq_desc;
2751
Ron Mercer7c734352009-10-19 03:32:19 +00002752 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002753
Ron Mercer7c734352009-10-19 03:32:19 +00002754 curr_idx = rx_ring->lbq_curr_idx;
2755 clean_idx = rx_ring->lbq_clean_idx;
2756 while (curr_idx != clean_idx) {
2757 lbq_desc = &rx_ring->lbq[curr_idx];
2758
2759 if (lbq_desc->p.pg_chunk.last_flag) {
2760 pci_unmap_page(qdev->pdev,
2761 lbq_desc->p.pg_chunk.map,
2762 ql_lbq_block_size(qdev),
2763 PCI_DMA_FROMDEVICE);
2764 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002765 }
Ron Mercer7c734352009-10-19 03:32:19 +00002766
2767 put_page(lbq_desc->p.pg_chunk.page);
2768 lbq_desc->p.pg_chunk.page = NULL;
2769
2770 if (++curr_idx == rx_ring->lbq_len)
2771 curr_idx = 0;
2772
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002773 }
2774}
2775
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002776static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002777{
2778 int i;
2779 struct bq_desc *sbq_desc;
2780
2781 for (i = 0; i < rx_ring->sbq_len; i++) {
2782 sbq_desc = &rx_ring->sbq[i];
2783 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002784 netif_err(qdev, ifup, qdev->ndev,
2785 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002786 return;
2787 }
2788 if (sbq_desc->p.skb) {
2789 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002790 dma_unmap_addr(sbq_desc, mapaddr),
2791 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002792 PCI_DMA_FROMDEVICE);
2793 dev_kfree_skb(sbq_desc->p.skb);
2794 sbq_desc->p.skb = NULL;
2795 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002796 }
2797}
2798
Ron Mercer4545a3f2009-02-23 10:42:17 +00002799/* Free all large and small rx buffers associated
2800 * with the completion queues for this device.
2801 */
2802static void ql_free_rx_buffers(struct ql_adapter *qdev)
2803{
2804 int i;
2805 struct rx_ring *rx_ring;
2806
2807 for (i = 0; i < qdev->rx_ring_count; i++) {
2808 rx_ring = &qdev->rx_ring[i];
2809 if (rx_ring->lbq)
2810 ql_free_lbq_buffers(qdev, rx_ring);
2811 if (rx_ring->sbq)
2812 ql_free_sbq_buffers(qdev, rx_ring);
2813 }
2814}
2815
2816static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2817{
2818 struct rx_ring *rx_ring;
2819 int i;
2820
2821 for (i = 0; i < qdev->rx_ring_count; i++) {
2822 rx_ring = &qdev->rx_ring[i];
2823 if (rx_ring->type != TX_Q)
2824 ql_update_buffer_queues(qdev, rx_ring);
2825 }
2826}
2827
2828static void ql_init_lbq_ring(struct ql_adapter *qdev,
2829 struct rx_ring *rx_ring)
2830{
2831 int i;
2832 struct bq_desc *lbq_desc;
2833 __le64 *bq = rx_ring->lbq_base;
2834
2835 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2836 for (i = 0; i < rx_ring->lbq_len; i++) {
2837 lbq_desc = &rx_ring->lbq[i];
2838 memset(lbq_desc, 0, sizeof(*lbq_desc));
2839 lbq_desc->index = i;
2840 lbq_desc->addr = bq;
2841 bq++;
2842 }
2843}
2844
2845static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002846 struct rx_ring *rx_ring)
2847{
2848 int i;
2849 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002850 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002851
Ron Mercer4545a3f2009-02-23 10:42:17 +00002852 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002853 for (i = 0; i < rx_ring->sbq_len; i++) {
2854 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002855 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002856 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002857 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002858 bq++;
2859 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002860}
2861
2862static void ql_free_rx_resources(struct ql_adapter *qdev,
2863 struct rx_ring *rx_ring)
2864{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002865 /* Free the small buffer queue. */
2866 if (rx_ring->sbq_base) {
2867 pci_free_consistent(qdev->pdev,
2868 rx_ring->sbq_size,
2869 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2870 rx_ring->sbq_base = NULL;
2871 }
2872
2873 /* Free the small buffer queue control blocks. */
2874 kfree(rx_ring->sbq);
2875 rx_ring->sbq = NULL;
2876
2877 /* Free the large buffer queue. */
2878 if (rx_ring->lbq_base) {
2879 pci_free_consistent(qdev->pdev,
2880 rx_ring->lbq_size,
2881 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2882 rx_ring->lbq_base = NULL;
2883 }
2884
2885 /* Free the large buffer queue control blocks. */
2886 kfree(rx_ring->lbq);
2887 rx_ring->lbq = NULL;
2888
2889 /* Free the rx queue. */
2890 if (rx_ring->cq_base) {
2891 pci_free_consistent(qdev->pdev,
2892 rx_ring->cq_size,
2893 rx_ring->cq_base, rx_ring->cq_base_dma);
2894 rx_ring->cq_base = NULL;
2895 }
2896}
2897
2898/* Allocate queues and buffers for this completions queue based
2899 * on the values in the parameter structure. */
2900static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2901 struct rx_ring *rx_ring)
2902{
2903
2904 /*
2905 * Allocate the completion queue for this rx_ring.
2906 */
2907 rx_ring->cq_base =
2908 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2909 &rx_ring->cq_base_dma);
2910
2911 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002912 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002913 return -ENOMEM;
2914 }
2915
2916 if (rx_ring->sbq_len) {
2917 /*
2918 * Allocate small buffer queue.
2919 */
2920 rx_ring->sbq_base =
2921 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2922 &rx_ring->sbq_base_dma);
2923
2924 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002925 netif_err(qdev, ifup, qdev->ndev,
2926 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002927 goto err_mem;
2928 }
2929
2930 /*
2931 * Allocate small buffer queue control blocks.
2932 */
2933 rx_ring->sbq =
2934 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2935 GFP_KERNEL);
2936 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002937 netif_err(qdev, ifup, qdev->ndev,
2938 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002939 goto err_mem;
2940 }
2941
Ron Mercer4545a3f2009-02-23 10:42:17 +00002942 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002943 }
2944
2945 if (rx_ring->lbq_len) {
2946 /*
2947 * Allocate large buffer queue.
2948 */
2949 rx_ring->lbq_base =
2950 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2951 &rx_ring->lbq_base_dma);
2952
2953 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002954 netif_err(qdev, ifup, qdev->ndev,
2955 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002956 goto err_mem;
2957 }
2958 /*
2959 * Allocate large buffer queue control blocks.
2960 */
2961 rx_ring->lbq =
2962 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2963 GFP_KERNEL);
2964 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002965 netif_err(qdev, ifup, qdev->ndev,
2966 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002967 goto err_mem;
2968 }
2969
Ron Mercer4545a3f2009-02-23 10:42:17 +00002970 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002971 }
2972
2973 return 0;
2974
2975err_mem:
2976 ql_free_rx_resources(qdev, rx_ring);
2977 return -ENOMEM;
2978}
2979
2980static void ql_tx_ring_clean(struct ql_adapter *qdev)
2981{
2982 struct tx_ring *tx_ring;
2983 struct tx_ring_desc *tx_ring_desc;
2984 int i, j;
2985
2986 /*
2987 * Loop through all queues and free
2988 * any resources.
2989 */
2990 for (j = 0; j < qdev->tx_ring_count; j++) {
2991 tx_ring = &qdev->tx_ring[j];
2992 for (i = 0; i < tx_ring->wq_len; i++) {
2993 tx_ring_desc = &tx_ring->q[i];
2994 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002995 netif_err(qdev, ifdown, qdev->ndev,
2996 "Freeing lost SKB %p, from queue %d, index %d.\n",
2997 tx_ring_desc->skb, j,
2998 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002999 ql_unmap_send(qdev, tx_ring_desc,
3000 tx_ring_desc->map_cnt);
3001 dev_kfree_skb(tx_ring_desc->skb);
3002 tx_ring_desc->skb = NULL;
3003 }
3004 }
3005 }
3006}
3007
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003008static void ql_free_mem_resources(struct ql_adapter *qdev)
3009{
3010 int i;
3011
3012 for (i = 0; i < qdev->tx_ring_count; i++)
3013 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3014 for (i = 0; i < qdev->rx_ring_count; i++)
3015 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3016 ql_free_shadow_space(qdev);
3017}
3018
3019static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3020{
3021 int i;
3022
3023 /* Allocate space for our shadow registers and such. */
3024 if (ql_alloc_shadow_space(qdev))
3025 return -ENOMEM;
3026
3027 for (i = 0; i < qdev->rx_ring_count; i++) {
3028 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003029 netif_err(qdev, ifup, qdev->ndev,
3030 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003031 goto err_mem;
3032 }
3033 }
3034 /* Allocate tx queue resources */
3035 for (i = 0; i < qdev->tx_ring_count; i++) {
3036 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003037 netif_err(qdev, ifup, qdev->ndev,
3038 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003039 goto err_mem;
3040 }
3041 }
3042 return 0;
3043
3044err_mem:
3045 ql_free_mem_resources(qdev);
3046 return -ENOMEM;
3047}
3048
3049/* Set up the rx ring control block and pass it to the chip.
3050 * The control block is defined as
3051 * "Completion Queue Initialization Control Block", or cqicb.
3052 */
3053static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3054{
3055 struct cqicb *cqicb = &rx_ring->cqicb;
3056 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003057 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003058 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003059 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003060 void __iomem *doorbell_area =
3061 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3062 int err = 0;
3063 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003064 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003065 __le64 *base_indirect_ptr;
3066 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003067
3068 /* Set up the shadow registers for this ring. */
3069 rx_ring->prod_idx_sh_reg = shadow_reg;
3070 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003071 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072 shadow_reg += sizeof(u64);
3073 shadow_reg_dma += sizeof(u64);
3074 rx_ring->lbq_base_indirect = shadow_reg;
3075 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003076 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3077 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003078 rx_ring->sbq_base_indirect = shadow_reg;
3079 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3080
3081 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003082 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003083 rx_ring->cnsmr_idx = 0;
3084 rx_ring->curr_entry = rx_ring->cq_base;
3085
3086 /* PCI doorbell mem area + 0x04 for valid register */
3087 rx_ring->valid_db_reg = doorbell_area + 0x04;
3088
3089 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003090 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003091
3092 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003093 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003094
3095 memset((void *)cqicb, 0, sizeof(struct cqicb));
3096 cqicb->msix_vect = rx_ring->irq;
3097
Ron Mercer459caf52009-01-04 17:08:11 -08003098 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3099 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003100
Ron Mercer97345522009-01-09 11:31:50 +00003101 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003102
Ron Mercer97345522009-01-09 11:31:50 +00003103 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003104
3105 /*
3106 * Set up the control block load flags.
3107 */
3108 cqicb->flags = FLAGS_LC | /* Load queue base address */
3109 FLAGS_LV | /* Load MSI-X vector */
3110 FLAGS_LI; /* Load irq delay values */
3111 if (rx_ring->lbq_len) {
3112 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003113 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003114 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003115 page_entries = 0;
3116 do {
3117 *base_indirect_ptr = cpu_to_le64(tmp);
3118 tmp += DB_PAGE_SIZE;
3119 base_indirect_ptr++;
3120 page_entries++;
3121 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003122 cqicb->lbq_addr =
3123 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003124 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3125 (u16) rx_ring->lbq_buf_size;
3126 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3127 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3128 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003129 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003130 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003131 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003132 rx_ring->lbq_clean_idx = 0;
3133 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134 }
3135 if (rx_ring->sbq_len) {
3136 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003137 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003138 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003139 page_entries = 0;
3140 do {
3141 *base_indirect_ptr = cpu_to_le64(tmp);
3142 tmp += DB_PAGE_SIZE;
3143 base_indirect_ptr++;
3144 page_entries++;
3145 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003146 cqicb->sbq_addr =
3147 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003148 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003149 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003150 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3151 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003153 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003155 rx_ring->sbq_clean_idx = 0;
3156 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003157 }
3158 switch (rx_ring->type) {
3159 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003160 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3161 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3162 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003163 case RX_Q:
3164 /* Inbound completion handling rx_rings run in
3165 * separate NAPI contexts.
3166 */
3167 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3168 64);
3169 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3170 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3171 break;
3172 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003173 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3174 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003175 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003176 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3177 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003178 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3179 CFG_LCQ, rx_ring->cq_id);
3180 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003181 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 return err;
3183 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003184 return err;
3185}
3186
3187static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3188{
3189 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3190 void __iomem *doorbell_area =
3191 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3192 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3193 (tx_ring->wq_id * sizeof(u64));
3194 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3195 (tx_ring->wq_id * sizeof(u64));
3196 int err = 0;
3197
3198 /*
3199 * Assign doorbell registers for this tx_ring.
3200 */
3201 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003202 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003203 tx_ring->prod_idx = 0;
3204 /* TX PCI doorbell mem area + 0x04 */
3205 tx_ring->valid_db_reg = doorbell_area + 0x04;
3206
3207 /*
3208 * Assign shadow registers for this tx_ring.
3209 */
3210 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3211 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3212
3213 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3214 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3215 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3216 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3217 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003218 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003219
Ron Mercer97345522009-01-09 11:31:50 +00003220 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003221
3222 ql_init_tx_ring(qdev, tx_ring);
3223
Ron Mercere3324712009-07-02 06:06:13 +00003224 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003225 (u16) tx_ring->wq_id);
3226 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003227 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003228 return err;
3229 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003230 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3231 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232 return err;
3233}
3234
3235static void ql_disable_msix(struct ql_adapter *qdev)
3236{
3237 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3238 pci_disable_msix(qdev->pdev);
3239 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3240 kfree(qdev->msi_x_entry);
3241 qdev->msi_x_entry = NULL;
3242 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3243 pci_disable_msi(qdev->pdev);
3244 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3245 }
3246}
3247
Ron Mercera4ab6132009-08-27 11:02:10 +00003248/* We start by trying to get the number of vectors
3249 * stored in qdev->intr_count. If we don't get that
3250 * many then we reduce the count and try again.
3251 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003252static void ql_enable_msix(struct ql_adapter *qdev)
3253{
Ron Mercera4ab6132009-08-27 11:02:10 +00003254 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003255
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003256 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003257 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003258 /* Try to alloc space for the msix struct,
3259 * if it fails then go to MSI/legacy.
3260 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003261 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003262 sizeof(struct msix_entry),
3263 GFP_KERNEL);
3264 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003265 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003266 goto msi;
3267 }
3268
Ron Mercera4ab6132009-08-27 11:02:10 +00003269 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003270 qdev->msi_x_entry[i].entry = i;
3271
Ron Mercera4ab6132009-08-27 11:02:10 +00003272 /* Loop to get our vectors. We start with
3273 * what we want and settle for what we get.
3274 */
3275 do {
3276 err = pci_enable_msix(qdev->pdev,
3277 qdev->msi_x_entry, qdev->intr_count);
3278 if (err > 0)
3279 qdev->intr_count = err;
3280 } while (err > 0);
3281
3282 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003283 kfree(qdev->msi_x_entry);
3284 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003285 netif_warn(qdev, ifup, qdev->ndev,
3286 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003287 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003288 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003289 } else if (err == 0) {
3290 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003291 netif_info(qdev, ifup, qdev->ndev,
3292 "MSI-X Enabled, got %d vectors.\n",
3293 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003294 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003295 }
3296 }
3297msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003298 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003299 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003300 if (!pci_enable_msi(qdev->pdev)) {
3301 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003302 netif_info(qdev, ifup, qdev->ndev,
3303 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003304 return;
3305 }
3306 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003307 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003308 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3309 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003310}
3311
Ron Mercer39aa8162009-08-27 11:02:11 +00003312/* Each vector services 1 RSS ring and and 1 or more
3313 * TX completion rings. This function loops through
3314 * the TX completion rings and assigns the vector that
3315 * will service it. An example would be if there are
3316 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3317 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003318 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003319 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3320 */
3321static void ql_set_tx_vect(struct ql_adapter *qdev)
3322{
3323 int i, j, vect;
3324 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3325
3326 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327 /* Assign irq vectors to TX rx_rings.*/
3328 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3329 i < qdev->rx_ring_count; i++) {
3330 if (j == tx_rings_per_vector) {
3331 vect++;
3332 j = 0;
3333 }
3334 qdev->rx_ring[i].irq = vect;
3335 j++;
3336 }
3337 } else {
3338 /* For single vector all rings have an irq
3339 * of zero.
3340 */
3341 for (i = 0; i < qdev->rx_ring_count; i++)
3342 qdev->rx_ring[i].irq = 0;
3343 }
3344}
3345
3346/* Set the interrupt mask for this vector. Each vector
3347 * will service 1 RSS ring and 1 or more TX completion
3348 * rings. This function sets up a bit mask per vector
3349 * that indicates which rings it services.
3350 */
3351static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3352{
3353 int j, vect = ctx->intr;
3354 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3355
3356 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3357 /* Add the RSS ring serviced by this vector
3358 * to the mask.
3359 */
3360 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3361 /* Add the TX ring(s) serviced by this vector
3362 * to the mask. */
3363 for (j = 0; j < tx_rings_per_vector; j++) {
3364 ctx->irq_mask |=
3365 (1 << qdev->rx_ring[qdev->rss_ring_count +
3366 (vect * tx_rings_per_vector) + j].cq_id);
3367 }
3368 } else {
3369 /* For single vector we just shift each queue's
3370 * ID into the mask.
3371 */
3372 for (j = 0; j < qdev->rx_ring_count; j++)
3373 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3374 }
3375}
3376
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003377/*
3378 * Here we build the intr_context structures based on
3379 * our rx_ring count and intr vector count.
3380 * The intr_context structure is used to hook each vector
3381 * to possibly different handlers.
3382 */
3383static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3384{
3385 int i = 0;
3386 struct intr_context *intr_context = &qdev->intr_context[0];
3387
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003388 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3389 /* Each rx_ring has it's
3390 * own intr_context since we have separate
3391 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003392 */
3393 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3394 qdev->rx_ring[i].irq = i;
3395 intr_context->intr = i;
3396 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003397 /* Set up this vector's bit-mask that indicates
3398 * which queues it services.
3399 */
3400 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003401 /*
3402 * We set up each vectors enable/disable/read bits so
3403 * there's no bit/mask calculations in the critical path.
3404 */
3405 intr_context->intr_en_mask =
3406 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3407 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3408 | i;
3409 intr_context->intr_dis_mask =
3410 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3411 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3412 INTR_EN_IHD | i;
3413 intr_context->intr_read_mask =
3414 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3415 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3416 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003417 if (i == 0) {
3418 /* The first vector/queue handles
3419 * broadcast/multicast, fatal errors,
3420 * and firmware events. This in addition
3421 * to normal inbound NAPI processing.
3422 */
3423 intr_context->handler = qlge_isr;
3424 sprintf(intr_context->name, "%s-rx-%d",
3425 qdev->ndev->name, i);
3426 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003427 /*
3428 * Inbound queues handle unicast frames only.
3429 */
3430 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003431 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003432 qdev->ndev->name, i);
3433 }
3434 }
3435 } else {
3436 /*
3437 * All rx_rings use the same intr_context since
3438 * there is only one vector.
3439 */
3440 intr_context->intr = 0;
3441 intr_context->qdev = qdev;
3442 /*
3443 * We set up each vectors enable/disable/read bits so
3444 * there's no bit/mask calculations in the critical path.
3445 */
3446 intr_context->intr_en_mask =
3447 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3448 intr_context->intr_dis_mask =
3449 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3450 INTR_EN_TYPE_DISABLE;
3451 intr_context->intr_read_mask =
3452 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3453 /*
3454 * Single interrupt means one handler for all rings.
3455 */
3456 intr_context->handler = qlge_isr;
3457 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003458 /* Set up this vector's bit-mask that indicates
3459 * which queues it services. In this case there is
3460 * a single vector so it will service all RSS and
3461 * TX completion rings.
3462 */
3463 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003464 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003465 /* Tell the TX completion rings which MSIx vector
3466 * they will be using.
3467 */
3468 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003469}
3470
3471static void ql_free_irq(struct ql_adapter *qdev)
3472{
3473 int i;
3474 struct intr_context *intr_context = &qdev->intr_context[0];
3475
3476 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3477 if (intr_context->hooked) {
3478 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3479 free_irq(qdev->msi_x_entry[i].vector,
3480 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003481 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3482 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003483 } else {
3484 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003485 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3486 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003487 }
3488 }
3489 }
3490 ql_disable_msix(qdev);
3491}
3492
3493static int ql_request_irq(struct ql_adapter *qdev)
3494{
3495 int i;
3496 int status = 0;
3497 struct pci_dev *pdev = qdev->pdev;
3498 struct intr_context *intr_context = &qdev->intr_context[0];
3499
3500 ql_resolve_queues_to_irqs(qdev);
3501
3502 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3503 atomic_set(&intr_context->irq_cnt, 0);
3504 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3505 status = request_irq(qdev->msi_x_entry[i].vector,
3506 intr_context->handler,
3507 0,
3508 intr_context->name,
3509 &qdev->rx_ring[i]);
3510 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003511 netif_err(qdev, ifup, qdev->ndev,
3512 "Failed request for MSIX interrupt %d.\n",
3513 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003514 goto err_irq;
3515 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003516 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517 "Hooked intr %d, queue type %s, with name %s.\n",
3518 i,
3519 qdev->rx_ring[i].type == DEFAULT_Q ?
3520 "DEFAULT_Q" :
3521 qdev->rx_ring[i].type == TX_Q ?
3522 "TX_Q" :
3523 qdev->rx_ring[i].type == RX_Q ?
3524 "RX_Q" : "",
3525 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003526 }
3527 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003528 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3529 "trying msi or legacy interrupts.\n");
3530 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3531 "%s: irq = %d.\n", __func__, pdev->irq);
3532 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3533 "%s: context->name = %s.\n", __func__,
3534 intr_context->name);
3535 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3536 "%s: dev_id = 0x%p.\n", __func__,
3537 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003538 status =
3539 request_irq(pdev->irq, qlge_isr,
3540 test_bit(QL_MSI_ENABLED,
3541 &qdev->
3542 flags) ? 0 : IRQF_SHARED,
3543 intr_context->name, &qdev->rx_ring[0]);
3544 if (status)
3545 goto err_irq;
3546
Joe Perchesae9540f72010-02-09 11:49:52 +00003547 netif_err(qdev, ifup, qdev->ndev,
3548 "Hooked intr %d, queue type %s, with name %s.\n",
3549 i,
3550 qdev->rx_ring[0].type == DEFAULT_Q ?
3551 "DEFAULT_Q" :
3552 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3553 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3554 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003555 }
3556 intr_context->hooked = 1;
3557 }
3558 return status;
3559err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003560 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003561 ql_free_irq(qdev);
3562 return status;
3563}
3564
3565static int ql_start_rss(struct ql_adapter *qdev)
3566{
Joe Perches215faf92010-12-21 02:16:10 -08003567 static const u8 init_hash_seed[] = {
3568 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3569 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3570 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3571 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3572 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3573 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003574 struct ricb *ricb = &qdev->ricb;
3575 int status = 0;
3576 int i;
3577 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3578
Ron Mercere3324712009-07-02 06:06:13 +00003579 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003580
Ron Mercerb2014ff2009-08-27 11:02:09 +00003581 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003582 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003583 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3584 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003585
3586 /*
3587 * Fill out the Indirection Table.
3588 */
Ron Mercer541ae282009-10-08 09:54:37 +00003589 for (i = 0; i < 1024; i++)
3590 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003591
Ron Mercer541ae282009-10-08 09:54:37 +00003592 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3593 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003594
Joe Perchesae9540f72010-02-09 11:49:52 +00003595 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003596
Ron Mercere3324712009-07-02 06:06:13 +00003597 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003599 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003600 return status;
3601 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003602 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3603 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003604 return status;
3605}
3606
Ron Mercera5f59dc2009-07-02 06:06:07 +00003607static int ql_clear_routing_entries(struct ql_adapter *qdev)
3608{
3609 int i, status = 0;
3610
3611 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3612 if (status)
3613 return status;
3614 /* Clear all the entries in the routing table. */
3615 for (i = 0; i < 16; i++) {
3616 status = ql_set_routing_reg(qdev, i, 0, 0);
3617 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003618 netif_err(qdev, ifup, qdev->ndev,
3619 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003620 break;
3621 }
3622 }
3623 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3624 return status;
3625}
3626
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003627/* Initialize the frame-to-queue routing. */
3628static int ql_route_initialize(struct ql_adapter *qdev)
3629{
3630 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003631
3632 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003633 status = ql_clear_routing_entries(qdev);
3634 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003635 return status;
3636
3637 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3638 if (status)
3639 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003640
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003641 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3642 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003643 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003644 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003645 "Failed to init routing register "
3646 "for IP CSUM error packets.\n");
3647 goto exit;
3648 }
3649 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3650 RT_IDX_TU_CSUM_ERR, 1);
3651 if (status) {
3652 netif_err(qdev, ifup, qdev->ndev,
3653 "Failed to init routing register "
3654 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003655 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003656 }
3657 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3658 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003659 netif_err(qdev, ifup, qdev->ndev,
3660 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003661 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003662 }
3663 /* If we have more than one inbound queue, then turn on RSS in the
3664 * routing block.
3665 */
3666 if (qdev->rss_ring_count > 1) {
3667 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3668 RT_IDX_RSS_MATCH, 1);
3669 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003670 netif_err(qdev, ifup, qdev->ndev,
3671 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003672 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003673 }
3674 }
3675
3676 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3677 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003678 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003679 netif_err(qdev, ifup, qdev->ndev,
3680 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003681exit:
3682 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683 return status;
3684}
3685
Ron Mercer2ee1e272009-03-03 12:10:33 +00003686int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003687{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003688 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003689
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003690 /* If check if the link is up and use to
3691 * determine if we are setting or clearing
3692 * the MAC address in the CAM.
3693 */
3694 set = ql_read32(qdev, STS);
3695 set &= qdev->port_link_up;
3696 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003697 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003698 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003699 return status;
3700 }
3701
3702 status = ql_route_initialize(qdev);
3703 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003704 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003705
3706 return status;
3707}
3708
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003709static int ql_adapter_initialize(struct ql_adapter *qdev)
3710{
3711 u32 value, mask;
3712 int i;
3713 int status = 0;
3714
3715 /*
3716 * Set up the System register to halt on errors.
3717 */
3718 value = SYS_EFE | SYS_FAE;
3719 mask = value << 16;
3720 ql_write32(qdev, SYS, mask | value);
3721
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003722 /* Set the default queue, and VLAN behavior. */
3723 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3724 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003725 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3726
3727 /* Set the MPI interrupt to enabled. */
3728 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3729
3730 /* Enable the function, set pagesize, enable error checking. */
3731 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003732 FSC_EC | FSC_VM_PAGE_4K;
3733 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003734
3735 /* Set/clear header splitting. */
3736 mask = FSC_VM_PAGESIZE_MASK |
3737 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3738 ql_write32(qdev, FSC, mask | value);
3739
Ron Mercer572c5262010-01-02 10:37:42 +00003740 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003741
Ron Mercera3b71932009-10-08 09:54:38 +00003742 /* Set RX packet routing to use port/pci function on which the
3743 * packet arrived on in addition to usual frame routing.
3744 * This is helpful on bonding where both interfaces can have
3745 * the same MAC address.
3746 */
3747 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003748 /* Reroute all packets to our Interface.
3749 * They may have been routed to MPI firmware
3750 * due to WOL.
3751 */
3752 value = ql_read32(qdev, MGMT_RCV_CFG);
3753 value &= ~MGMT_RCV_CFG_RM;
3754 mask = 0xffff0000;
3755
3756 /* Sticky reg needs clearing due to WOL. */
3757 ql_write32(qdev, MGMT_RCV_CFG, mask);
3758 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3759
3760 /* Default WOL is enable on Mezz cards */
3761 if (qdev->pdev->subsystem_device == 0x0068 ||
3762 qdev->pdev->subsystem_device == 0x0180)
3763 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003764
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003765 /* Start up the rx queues. */
3766 for (i = 0; i < qdev->rx_ring_count; i++) {
3767 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3768 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003769 netif_err(qdev, ifup, qdev->ndev,
3770 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003771 return status;
3772 }
3773 }
3774
3775 /* If there is more than one inbound completion queue
3776 * then download a RICB to configure RSS.
3777 */
3778 if (qdev->rss_ring_count > 1) {
3779 status = ql_start_rss(qdev);
3780 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003781 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003782 return status;
3783 }
3784 }
3785
3786 /* Start up the tx queues. */
3787 for (i = 0; i < qdev->tx_ring_count; i++) {
3788 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3789 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003790 netif_err(qdev, ifup, qdev->ndev,
3791 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003792 return status;
3793 }
3794 }
3795
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003796 /* Initialize the port and set the max framesize. */
3797 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003798 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003799 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003800
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003801 /* Set up the MAC address and frame routing filter. */
3802 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003803 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003804 netif_err(qdev, ifup, qdev->ndev,
3805 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003806 return status;
3807 }
3808
3809 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003810 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003811 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3812 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 napi_enable(&qdev->rx_ring[i].napi);
3814 }
3815
3816 return status;
3817}
3818
3819/* Issue soft reset to chip. */
3820static int ql_adapter_reset(struct ql_adapter *qdev)
3821{
3822 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003823 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003824 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003825
Ron Mercera5f59dc2009-07-02 06:06:07 +00003826 /* Clear all the entries in the routing table. */
3827 status = ql_clear_routing_entries(qdev);
3828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003829 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003830 return status;
3831 }
3832
3833 end_jiffies = jiffies +
3834 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003835
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003836 /* Check if bit is set then skip the mailbox command and
3837 * clear the bit, else we are in normal reset process.
3838 */
3839 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3840 /* Stop management traffic. */
3841 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003842
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003843 /* Wait for the NIC and MGMNT FIFOs to empty. */
3844 ql_wait_fifo_empty(qdev);
3845 } else
3846 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003847
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003848 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003849
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003850 do {
3851 value = ql_read32(qdev, RST_FO);
3852 if ((value & RST_FO_FR) == 0)
3853 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003854 cpu_relax();
3855 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003856
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003857 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003858 netif_err(qdev, ifdown, qdev->ndev,
3859 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003860 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003861 }
3862
Ron Mercer84087f42009-10-08 09:54:41 +00003863 /* Resume management traffic. */
3864 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003865 return status;
3866}
3867
3868static void ql_display_dev_info(struct net_device *ndev)
3869{
Joe Perchesb16fed02010-11-15 11:12:28 +00003870 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003871
Joe Perchesae9540f72010-02-09 11:49:52 +00003872 netif_info(qdev, probe, qdev->ndev,
3873 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3874 "XG Roll = %d, XG Rev = %d.\n",
3875 qdev->func,
3876 qdev->port,
3877 qdev->chip_rev_id & 0x0000000f,
3878 qdev->chip_rev_id >> 4 & 0x0000000f,
3879 qdev->chip_rev_id >> 8 & 0x0000000f,
3880 qdev->chip_rev_id >> 12 & 0x0000000f);
3881 netif_info(qdev, probe, qdev->ndev,
3882 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003883}
3884
stephen hemmingerac409212010-10-21 07:50:54 +00003885static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003886{
3887 int status = 0;
3888 u32 wol = MB_WOL_DISABLE;
3889
3890 /* The CAM is still intact after a reset, but if we
3891 * are doing WOL, then we may need to program the
3892 * routing regs. We would also need to issue the mailbox
3893 * commands to instruct the MPI what to do per the ethtool
3894 * settings.
3895 */
3896
3897 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3898 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003899 netif_err(qdev, ifdown, qdev->ndev,
3900 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3901 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003902 return -EINVAL;
3903 }
3904
3905 if (qdev->wol & WAKE_MAGIC) {
3906 status = ql_mb_wol_set_magic(qdev, 1);
3907 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003908 netif_err(qdev, ifdown, qdev->ndev,
3909 "Failed to set magic packet on %s.\n",
3910 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003911 return status;
3912 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003913 netif_info(qdev, drv, qdev->ndev,
3914 "Enabled magic packet successfully on %s.\n",
3915 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003916
3917 wol |= MB_WOL_MAGIC_PKT;
3918 }
3919
3920 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003921 wol |= MB_WOL_MODE_ON;
3922 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003923 netif_err(qdev, drv, qdev->ndev,
3924 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003925 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003926 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003927 }
3928
3929 return status;
3930}
3931
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003932static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003934
Ron Mercer6497b602009-02-12 16:37:13 -08003935 /* Don't kill the reset worker thread if we
3936 * are in the process of recovery.
3937 */
3938 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3939 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003940 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3941 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003942 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003943 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003944 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003945}
3946
3947static int ql_adapter_down(struct ql_adapter *qdev)
3948{
3949 int i, status = 0;
3950
3951 ql_link_off(qdev);
3952
3953 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954
Ron Mercer39aa8162009-08-27 11:02:11 +00003955 for (i = 0; i < qdev->rss_ring_count; i++)
3956 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003957
3958 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3959
3960 ql_disable_interrupts(qdev);
3961
3962 ql_tx_ring_clean(qdev);
3963
Ron Mercer6b318cb2009-03-09 10:59:26 +00003964 /* Call netif_napi_del() from common point.
3965 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003966 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003967 netif_napi_del(&qdev->rx_ring[i].napi);
3968
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003969 status = ql_adapter_reset(qdev);
3970 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003971 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3972 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003973 ql_free_rx_buffers(qdev);
3974
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003975 return status;
3976}
3977
3978static int ql_adapter_up(struct ql_adapter *qdev)
3979{
3980 int err = 0;
3981
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003982 err = ql_adapter_initialize(qdev);
3983 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003984 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003985 goto err_init;
3986 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003987 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003988 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003989 /* If the port is initialized and the
3990 * link is up the turn on the carrier.
3991 */
3992 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3993 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003994 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003995 /* Restore rx mode. */
3996 clear_bit(QL_ALLMULTI, &qdev->flags);
3997 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3998 qlge_set_multicast_list(qdev->ndev);
3999
Ron Mercerc1b60092010-10-27 04:58:12 +00004000 /* Restore vlan setting. */
4001 qlge_restore_vlan(qdev);
4002
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004003 ql_enable_interrupts(qdev);
4004 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004005 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004006
4007 return 0;
4008err_init:
4009 ql_adapter_reset(qdev);
4010 return err;
4011}
4012
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004013static void ql_release_adapter_resources(struct ql_adapter *qdev)
4014{
4015 ql_free_mem_resources(qdev);
4016 ql_free_irq(qdev);
4017}
4018
4019static int ql_get_adapter_resources(struct ql_adapter *qdev)
4020{
4021 int status = 0;
4022
4023 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004024 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004025 return -ENOMEM;
4026 }
4027 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004028 return status;
4029}
4030
4031static int qlge_close(struct net_device *ndev)
4032{
4033 struct ql_adapter *qdev = netdev_priv(ndev);
4034
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004035 /* If we hit pci_channel_io_perm_failure
4036 * failure condition, then we already
4037 * brought the adapter down.
4038 */
4039 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004040 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004041 clear_bit(QL_EEH_FATAL, &qdev->flags);
4042 return 0;
4043 }
4044
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004045 /*
4046 * Wait for device to recover from a reset.
4047 * (Rarely happens, but possible.)
4048 */
4049 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4050 msleep(1);
4051 ql_adapter_down(qdev);
4052 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004053 return 0;
4054}
4055
4056static int ql_configure_rings(struct ql_adapter *qdev)
4057{
4058 int i;
4059 struct rx_ring *rx_ring;
4060 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004061 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004062 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4063 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4064
4065 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004066
Ron Mercera4ab6132009-08-27 11:02:10 +00004067 /* In a perfect world we have one RSS ring for each CPU
4068 * and each has it's own vector. To do that we ask for
4069 * cpu_cnt vectors. ql_enable_msix() will adjust the
4070 * vector count to what we actually get. We then
4071 * allocate an RSS ring for each.
4072 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004073 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004074 qdev->intr_count = cpu_cnt;
4075 ql_enable_msix(qdev);
4076 /* Adjust the RSS ring count to the actual vector count. */
4077 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004078 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004079 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004080
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004081 for (i = 0; i < qdev->tx_ring_count; i++) {
4082 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004083 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004084 tx_ring->qdev = qdev;
4085 tx_ring->wq_id = i;
4086 tx_ring->wq_len = qdev->tx_ring_size;
4087 tx_ring->wq_size =
4088 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4089
4090 /*
4091 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004092 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004093 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004094 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004095 }
4096
4097 for (i = 0; i < qdev->rx_ring_count; i++) {
4098 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004099 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004100 rx_ring->qdev = qdev;
4101 rx_ring->cq_id = i;
4102 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004103 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004104 /*
4105 * Inbound (RSS) queues.
4106 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004107 rx_ring->cq_len = qdev->rx_ring_size;
4108 rx_ring->cq_size =
4109 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4110 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4111 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004112 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004113 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004114 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4115 "lbq_buf_size %d, order = %d\n",
4116 rx_ring->lbq_buf_size,
4117 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004118 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4119 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004120 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004121 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004122 rx_ring->type = RX_Q;
4123 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004124 /*
4125 * Outbound queue handles outbound completions only.
4126 */
4127 /* outbound cq is same size as tx_ring it services. */
4128 rx_ring->cq_len = qdev->tx_ring_size;
4129 rx_ring->cq_size =
4130 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4131 rx_ring->lbq_len = 0;
4132 rx_ring->lbq_size = 0;
4133 rx_ring->lbq_buf_size = 0;
4134 rx_ring->sbq_len = 0;
4135 rx_ring->sbq_size = 0;
4136 rx_ring->sbq_buf_size = 0;
4137 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004138 }
4139 }
4140 return 0;
4141}
4142
4143static int qlge_open(struct net_device *ndev)
4144{
4145 int err = 0;
4146 struct ql_adapter *qdev = netdev_priv(ndev);
4147
Ron Mercer74e12432009-11-11 12:54:04 +00004148 err = ql_adapter_reset(qdev);
4149 if (err)
4150 return err;
4151
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004152 err = ql_configure_rings(qdev);
4153 if (err)
4154 return err;
4155
4156 err = ql_get_adapter_resources(qdev);
4157 if (err)
4158 goto error_up;
4159
4160 err = ql_adapter_up(qdev);
4161 if (err)
4162 goto error_up;
4163
4164 return err;
4165
4166error_up:
4167 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004168 return err;
4169}
4170
Ron Mercer7c734352009-10-19 03:32:19 +00004171static int ql_change_rx_buffers(struct ql_adapter *qdev)
4172{
4173 struct rx_ring *rx_ring;
4174 int i, status;
4175 u32 lbq_buf_len;
4176
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004177 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004178 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4179 int i = 3;
4180 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004181 netif_err(qdev, ifup, qdev->ndev,
4182 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004183 ssleep(1);
4184 }
4185
4186 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004187 netif_err(qdev, ifup, qdev->ndev,
4188 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004189 return -ETIMEDOUT;
4190 }
4191 }
4192
4193 status = ql_adapter_down(qdev);
4194 if (status)
4195 goto error;
4196
4197 /* Get the new rx buffer size. */
4198 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4199 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4200 qdev->lbq_buf_order = get_order(lbq_buf_len);
4201
4202 for (i = 0; i < qdev->rss_ring_count; i++) {
4203 rx_ring = &qdev->rx_ring[i];
4204 /* Set the new size. */
4205 rx_ring->lbq_buf_size = lbq_buf_len;
4206 }
4207
4208 status = ql_adapter_up(qdev);
4209 if (status)
4210 goto error;
4211
4212 return status;
4213error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004214 netif_alert(qdev, ifup, qdev->ndev,
4215 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004216 set_bit(QL_ADAPTER_UP, &qdev->flags);
4217 dev_close(qdev->ndev);
4218 return status;
4219}
4220
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004221static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4222{
4223 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004224 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004225
4226 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004227 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004228 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004229 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004230 } else
4231 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004232
4233 queue_delayed_work(qdev->workqueue,
4234 &qdev->mpi_port_cfg_work, 3*HZ);
4235
Breno Leitao746079d2010-02-04 10:11:19 +00004236 ndev->mtu = new_mtu;
4237
Ron Mercer7c734352009-10-19 03:32:19 +00004238 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004239 return 0;
4240 }
4241
Ron Mercer7c734352009-10-19 03:32:19 +00004242 status = ql_change_rx_buffers(qdev);
4243 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004244 netif_err(qdev, ifup, qdev->ndev,
4245 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004246 }
4247
4248 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004249}
4250
4251static struct net_device_stats *qlge_get_stats(struct net_device
4252 *ndev)
4253{
Ron Mercer885ee392009-11-03 13:49:31 +00004254 struct ql_adapter *qdev = netdev_priv(ndev);
4255 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4256 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4257 unsigned long pkts, mcast, dropped, errors, bytes;
4258 int i;
4259
4260 /* Get RX stats. */
4261 pkts = mcast = dropped = errors = bytes = 0;
4262 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4263 pkts += rx_ring->rx_packets;
4264 bytes += rx_ring->rx_bytes;
4265 dropped += rx_ring->rx_dropped;
4266 errors += rx_ring->rx_errors;
4267 mcast += rx_ring->rx_multicast;
4268 }
4269 ndev->stats.rx_packets = pkts;
4270 ndev->stats.rx_bytes = bytes;
4271 ndev->stats.rx_dropped = dropped;
4272 ndev->stats.rx_errors = errors;
4273 ndev->stats.multicast = mcast;
4274
4275 /* Get TX stats. */
4276 pkts = errors = bytes = 0;
4277 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4278 pkts += tx_ring->tx_packets;
4279 bytes += tx_ring->tx_bytes;
4280 errors += tx_ring->tx_errors;
4281 }
4282 ndev->stats.tx_packets = pkts;
4283 ndev->stats.tx_bytes = bytes;
4284 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004285 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004286}
4287
stephen hemmingerac409212010-10-21 07:50:54 +00004288static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004289{
Joe Perchesb16fed02010-11-15 11:12:28 +00004290 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004291 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004292 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004293
Ron Mercercc288f52009-02-23 10:42:14 +00004294 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4295 if (status)
4296 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004297 /*
4298 * Set or clear promiscuous mode if a
4299 * transition is taking place.
4300 */
4301 if (ndev->flags & IFF_PROMISC) {
4302 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4303 if (ql_set_routing_reg
4304 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004305 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004306 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004307 } else {
4308 set_bit(QL_PROMISCUOUS, &qdev->flags);
4309 }
4310 }
4311 } else {
4312 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4313 if (ql_set_routing_reg
4314 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004315 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004316 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004317 } else {
4318 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4319 }
4320 }
4321 }
4322
4323 /*
4324 * Set or clear all multicast mode if a
4325 * transition is taking place.
4326 */
4327 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004328 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004329 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4330 if (ql_set_routing_reg
4331 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004332 netif_err(qdev, hw, qdev->ndev,
4333 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004334 } else {
4335 set_bit(QL_ALLMULTI, &qdev->flags);
4336 }
4337 }
4338 } else {
4339 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4340 if (ql_set_routing_reg
4341 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004342 netif_err(qdev, hw, qdev->ndev,
4343 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004344 } else {
4345 clear_bit(QL_ALLMULTI, &qdev->flags);
4346 }
4347 }
4348 }
4349
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004350 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004351 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4352 if (status)
4353 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004354 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004355 netdev_for_each_mc_addr(ha, ndev) {
4356 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004357 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004358 netif_err(qdev, hw, qdev->ndev,
4359 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004360 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004361 goto exit;
4362 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004363 i++;
4364 }
Ron Mercercc288f52009-02-23 10:42:14 +00004365 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004366 if (ql_set_routing_reg
4367 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004368 netif_err(qdev, hw, qdev->ndev,
4369 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004370 } else {
4371 set_bit(QL_ALLMULTI, &qdev->flags);
4372 }
4373 }
4374exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004375 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004376}
4377
4378static int qlge_set_mac_address(struct net_device *ndev, void *p)
4379{
Joe Perchesb16fed02010-11-15 11:12:28 +00004380 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004381 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004382 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004383
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004384 if (!is_valid_ether_addr(addr->sa_data))
4385 return -EADDRNOTAVAIL;
4386 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004387 /* Update local copy of current mac address. */
4388 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004389
Ron Mercercc288f52009-02-23 10:42:14 +00004390 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4391 if (status)
4392 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004393 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4394 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004395 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004396 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004397 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4398 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004399}
4400
4401static void qlge_tx_timeout(struct net_device *ndev)
4402{
Joe Perchesb16fed02010-11-15 11:12:28 +00004403 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004404 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004405}
4406
4407static void ql_asic_reset_work(struct work_struct *work)
4408{
4409 struct ql_adapter *qdev =
4410 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004411 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004412 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004413 status = ql_adapter_down(qdev);
4414 if (status)
4415 goto error;
4416
4417 status = ql_adapter_up(qdev);
4418 if (status)
4419 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004420
4421 /* Restore rx mode. */
4422 clear_bit(QL_ALLMULTI, &qdev->flags);
4423 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4424 qlge_set_multicast_list(qdev->ndev);
4425
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004426 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004427 return;
4428error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004429 netif_alert(qdev, ifup, qdev->ndev,
4430 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004431
Ron Mercerdb988122009-03-09 10:59:17 +00004432 set_bit(QL_ADAPTER_UP, &qdev->flags);
4433 dev_close(qdev->ndev);
4434 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004435}
4436
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004437static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004438 .get_flash = ql_get_8012_flash_params,
4439 .port_initialize = ql_8012_port_initialize,
4440};
4441
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004442static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004443 .get_flash = ql_get_8000_flash_params,
4444 .port_initialize = ql_8000_port_initialize,
4445};
4446
Ron Mercere4552f52009-06-09 05:39:32 +00004447/* Find the pcie function number for the other NIC
4448 * on this chip. Since both NIC functions share a
4449 * common firmware we have the lowest enabled function
4450 * do any common work. Examples would be resetting
4451 * after a fatal firmware error, or doing a firmware
4452 * coredump.
4453 */
4454static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004455{
Ron Mercere4552f52009-06-09 05:39:32 +00004456 int status = 0;
4457 u32 temp;
4458 u32 nic_func1, nic_func2;
4459
4460 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4461 &temp);
4462 if (status)
4463 return status;
4464
4465 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4466 MPI_TEST_NIC_FUNC_MASK);
4467 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4468 MPI_TEST_NIC_FUNC_MASK);
4469
4470 if (qdev->func == nic_func1)
4471 qdev->alt_func = nic_func2;
4472 else if (qdev->func == nic_func2)
4473 qdev->alt_func = nic_func1;
4474 else
4475 status = -EIO;
4476
4477 return status;
4478}
4479
4480static int ql_get_board_info(struct ql_adapter *qdev)
4481{
4482 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004483 qdev->func =
4484 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004485 if (qdev->func > 3)
4486 return -EIO;
4487
4488 status = ql_get_alt_pcie_func(qdev);
4489 if (status)
4490 return status;
4491
4492 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4493 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004494 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4495 qdev->port_link_up = STS_PL1;
4496 qdev->port_init = STS_PI1;
4497 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4498 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4499 } else {
4500 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4501 qdev->port_link_up = STS_PL0;
4502 qdev->port_init = STS_PI0;
4503 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4504 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4505 }
4506 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004507 qdev->device_id = qdev->pdev->device;
4508 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4509 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004510 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4511 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004512 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004513}
4514
4515static void ql_release_all(struct pci_dev *pdev)
4516{
4517 struct net_device *ndev = pci_get_drvdata(pdev);
4518 struct ql_adapter *qdev = netdev_priv(ndev);
4519
4520 if (qdev->workqueue) {
4521 destroy_workqueue(qdev->workqueue);
4522 qdev->workqueue = NULL;
4523 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004524
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004525 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004526 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004527 if (qdev->doorbell_area)
4528 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004529 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 pci_release_regions(pdev);
4531 pci_set_drvdata(pdev, NULL);
4532}
4533
4534static int __devinit ql_init_device(struct pci_dev *pdev,
4535 struct net_device *ndev, int cards_found)
4536{
4537 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004538 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004539
Ron Mercere3324712009-07-02 06:06:13 +00004540 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004541 err = pci_enable_device(pdev);
4542 if (err) {
4543 dev_err(&pdev->dev, "PCI device enable failed.\n");
4544 return err;
4545 }
4546
Ron Mercerebd6e772009-09-29 08:39:25 +00004547 qdev->ndev = ndev;
4548 qdev->pdev = pdev;
4549 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004550
Ron Mercerbc9167f2009-10-10 09:35:04 +00004551 /* Set PCIe read request size */
4552 err = pcie_set_readrq(pdev, 4096);
4553 if (err) {
4554 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004555 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004556 }
4557
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004558 err = pci_request_regions(pdev, DRV_NAME);
4559 if (err) {
4560 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004561 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004562 }
4563
4564 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004565 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004567 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004568 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004569 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004570 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004571 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004572 }
4573
4574 if (err) {
4575 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004576 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004577 }
4578
Ron Mercer73475332009-11-06 07:44:58 +00004579 /* Set PCIe reset type for EEH to fundamental. */
4580 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004581 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582 qdev->reg_base =
4583 ioremap_nocache(pci_resource_start(pdev, 1),
4584 pci_resource_len(pdev, 1));
4585 if (!qdev->reg_base) {
4586 dev_err(&pdev->dev, "Register mapping failed.\n");
4587 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004588 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004589 }
4590
4591 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4592 qdev->doorbell_area =
4593 ioremap_nocache(pci_resource_start(pdev, 3),
4594 pci_resource_len(pdev, 3));
4595 if (!qdev->doorbell_area) {
4596 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4597 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004598 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004599 }
4600
Ron Mercere4552f52009-06-09 05:39:32 +00004601 err = ql_get_board_info(qdev);
4602 if (err) {
4603 dev_err(&pdev->dev, "Register access failed.\n");
4604 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004605 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004606 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004607 qdev->msg_enable = netif_msg_init(debug, default_msg);
4608 spin_lock_init(&qdev->hw_lock);
4609 spin_lock_init(&qdev->stats_lock);
4610
Ron Mercer8aae2602010-01-15 13:31:28 +00004611 if (qlge_mpi_coredump) {
4612 qdev->mpi_coredump =
4613 vmalloc(sizeof(struct ql_mpi_coredump));
4614 if (qdev->mpi_coredump == NULL) {
4615 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4616 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004617 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004618 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004619 if (qlge_force_coredump)
4620 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004621 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004622 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004623 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004624 if (err) {
4625 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004626 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004627 }
4628
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004629 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004630 /* Keep local copy of current mac address. */
4631 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004632
4633 /* Set up the default ring sizes. */
4634 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4635 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4636
4637 /* Set up the coalescing parameters. */
4638 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4639 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4640 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4641 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4642
4643 /*
4644 * Set up the operating parameters.
4645 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004646 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4647 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4648 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4649 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004650 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004651 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004652 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004653 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004654 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004655
4656 if (!cards_found) {
4657 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4658 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4659 DRV_NAME, DRV_VERSION);
4660 }
4661 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004662err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004663 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004664err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004665 pci_disable_device(pdev);
4666 return err;
4667}
4668
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004669static const struct net_device_ops qlge_netdev_ops = {
4670 .ndo_open = qlge_open,
4671 .ndo_stop = qlge_close,
4672 .ndo_start_xmit = qlge_send,
4673 .ndo_change_mtu = qlge_change_mtu,
4674 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004675 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004676 .ndo_set_mac_address = qlge_set_mac_address,
4677 .ndo_validate_addr = eth_validate_addr,
4678 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004679 .ndo_fix_features = qlge_fix_features,
4680 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004681 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4682 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004683};
4684
Ron Mercer15c052f2010-02-04 13:32:46 -08004685static void ql_timer(unsigned long data)
4686{
4687 struct ql_adapter *qdev = (struct ql_adapter *)data;
4688 u32 var = 0;
4689
4690 var = ql_read32(qdev, STS);
4691 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004692 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004693 return;
4694 }
4695
Breno Leitao72046d82010-07-01 03:00:17 +00004696 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004697}
4698
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004699static int __devinit qlge_probe(struct pci_dev *pdev,
4700 const struct pci_device_id *pci_entry)
4701{
4702 struct net_device *ndev = NULL;
4703 struct ql_adapter *qdev = NULL;
4704 static int cards_found = 0;
4705 int err = 0;
4706
Ron Mercer1e213302009-03-09 10:59:21 +00004707 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4708 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004709 if (!ndev)
4710 return -ENOMEM;
4711
4712 err = ql_init_device(pdev, ndev, cards_found);
4713 if (err < 0) {
4714 free_netdev(ndev);
4715 return err;
4716 }
4717
4718 qdev = netdev_priv(ndev);
4719 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004720 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4721 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4722 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4723 ndev->features = ndev->hw_features |
4724 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004725
4726 if (test_bit(QL_DMA64, &qdev->flags))
4727 ndev->features |= NETIF_F_HIGHDMA;
4728
4729 /*
4730 * Set up net_device structure.
4731 */
4732 ndev->tx_queue_len = qdev->tx_ring_size;
4733 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004734
4735 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004736 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004737 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004738
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004739 err = register_netdev(ndev);
4740 if (err) {
4741 dev_err(&pdev->dev, "net device registration failed.\n");
4742 ql_release_all(pdev);
4743 pci_disable_device(pdev);
4744 return err;
4745 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004746 /* Start up the timer to trigger EEH if
4747 * the bus goes dead
4748 */
4749 init_timer_deferrable(&qdev->timer);
4750 qdev->timer.data = (unsigned long)qdev;
4751 qdev->timer.function = ql_timer;
4752 qdev->timer.expires = jiffies + (5*HZ);
4753 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004754 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004755 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004756 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004757 cards_found++;
4758 return 0;
4759}
4760
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004761netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4762{
4763 return qlge_send(skb, ndev);
4764}
4765
4766int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4767{
4768 return ql_clean_inbound_rx_ring(rx_ring, budget);
4769}
4770
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004771static void __devexit qlge_remove(struct pci_dev *pdev)
4772{
4773 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004774 struct ql_adapter *qdev = netdev_priv(ndev);
4775 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004776 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004777 unregister_netdev(ndev);
4778 ql_release_all(pdev);
4779 pci_disable_device(pdev);
4780 free_netdev(ndev);
4781}
4782
Ron Mercer6d190c62009-10-28 08:39:20 +00004783/* Clean up resources without touching hardware. */
4784static void ql_eeh_close(struct net_device *ndev)
4785{
4786 int i;
4787 struct ql_adapter *qdev = netdev_priv(ndev);
4788
4789 if (netif_carrier_ok(ndev)) {
4790 netif_carrier_off(ndev);
4791 netif_stop_queue(ndev);
4792 }
4793
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004794 /* Disabling the timer */
4795 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004796 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004797
4798 for (i = 0; i < qdev->rss_ring_count; i++)
4799 netif_napi_del(&qdev->rx_ring[i].napi);
4800
4801 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4802 ql_tx_ring_clean(qdev);
4803 ql_free_rx_buffers(qdev);
4804 ql_release_adapter_resources(qdev);
4805}
4806
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004807/*
4808 * This callback is called by the PCI subsystem whenever
4809 * a PCI bus error is detected.
4810 */
4811static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4812 enum pci_channel_state state)
4813{
4814 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004815 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004816
Ron Mercer6d190c62009-10-28 08:39:20 +00004817 switch (state) {
4818 case pci_channel_io_normal:
4819 return PCI_ERS_RESULT_CAN_RECOVER;
4820 case pci_channel_io_frozen:
4821 netif_device_detach(ndev);
4822 if (netif_running(ndev))
4823 ql_eeh_close(ndev);
4824 pci_disable_device(pdev);
4825 return PCI_ERS_RESULT_NEED_RESET;
4826 case pci_channel_io_perm_failure:
4827 dev_err(&pdev->dev,
4828 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004829 ql_eeh_close(ndev);
4830 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004831 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004832 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004833
4834 /* Request a slot reset. */
4835 return PCI_ERS_RESULT_NEED_RESET;
4836}
4837
4838/*
4839 * This callback is called after the PCI buss has been reset.
4840 * Basically, this tries to restart the card from scratch.
4841 * This is a shortened version of the device probe/discovery code,
4842 * it resembles the first-half of the () routine.
4843 */
4844static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4845{
4846 struct net_device *ndev = pci_get_drvdata(pdev);
4847 struct ql_adapter *qdev = netdev_priv(ndev);
4848
Ron Mercer6d190c62009-10-28 08:39:20 +00004849 pdev->error_state = pci_channel_io_normal;
4850
4851 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004852 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004853 netif_err(qdev, ifup, qdev->ndev,
4854 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004855 return PCI_ERS_RESULT_DISCONNECT;
4856 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004857 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004858
4859 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004860 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004861 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004862 return PCI_ERS_RESULT_DISCONNECT;
4863 }
4864
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004865 return PCI_ERS_RESULT_RECOVERED;
4866}
4867
4868static void qlge_io_resume(struct pci_dev *pdev)
4869{
4870 struct net_device *ndev = pci_get_drvdata(pdev);
4871 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004872 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004873
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004874 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004875 err = qlge_open(ndev);
4876 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004877 netif_err(qdev, ifup, qdev->ndev,
4878 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004879 return;
4880 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004881 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004882 netif_err(qdev, ifup, qdev->ndev,
4883 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004884 }
Breno Leitao72046d82010-07-01 03:00:17 +00004885 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004886 netif_device_attach(ndev);
4887}
4888
4889static struct pci_error_handlers qlge_err_handler = {
4890 .error_detected = qlge_io_error_detected,
4891 .slot_reset = qlge_io_slot_reset,
4892 .resume = qlge_io_resume,
4893};
4894
4895static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4896{
4897 struct net_device *ndev = pci_get_drvdata(pdev);
4898 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004899 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004900
4901 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004902 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004903
4904 if (netif_running(ndev)) {
4905 err = ql_adapter_down(qdev);
4906 if (!err)
4907 return err;
4908 }
4909
Ron Mercerbc083ce2009-10-21 11:07:40 +00004910 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004911 err = pci_save_state(pdev);
4912 if (err)
4913 return err;
4914
4915 pci_disable_device(pdev);
4916
4917 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4918
4919 return 0;
4920}
4921
David S. Miller04da2cf2008-09-19 16:14:24 -07004922#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004923static int qlge_resume(struct pci_dev *pdev)
4924{
4925 struct net_device *ndev = pci_get_drvdata(pdev);
4926 struct ql_adapter *qdev = netdev_priv(ndev);
4927 int err;
4928
4929 pci_set_power_state(pdev, PCI_D0);
4930 pci_restore_state(pdev);
4931 err = pci_enable_device(pdev);
4932 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004933 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004934 return err;
4935 }
4936 pci_set_master(pdev);
4937
4938 pci_enable_wake(pdev, PCI_D3hot, 0);
4939 pci_enable_wake(pdev, PCI_D3cold, 0);
4940
4941 if (netif_running(ndev)) {
4942 err = ql_adapter_up(qdev);
4943 if (err)
4944 return err;
4945 }
4946
Breno Leitao72046d82010-07-01 03:00:17 +00004947 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004948 netif_device_attach(ndev);
4949
4950 return 0;
4951}
David S. Miller04da2cf2008-09-19 16:14:24 -07004952#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004953
4954static void qlge_shutdown(struct pci_dev *pdev)
4955{
4956 qlge_suspend(pdev, PMSG_SUSPEND);
4957}
4958
4959static struct pci_driver qlge_driver = {
4960 .name = DRV_NAME,
4961 .id_table = qlge_pci_tbl,
4962 .probe = qlge_probe,
4963 .remove = __devexit_p(qlge_remove),
4964#ifdef CONFIG_PM
4965 .suspend = qlge_suspend,
4966 .resume = qlge_resume,
4967#endif
4968 .shutdown = qlge_shutdown,
4969 .err_handler = &qlge_err_handler
4970};
4971
4972static int __init qlge_init_module(void)
4973{
4974 return pci_register_driver(&qlge_driver);
4975}
4976
4977static void __exit qlge_exit(void)
4978{
4979 pci_unregister_driver(&qlge_driver);
4980}
4981
4982module_init(qlge_init_module);
4983module_exit(qlge_exit);