blob: ce6c6fee3089f63b2f0f900968f6f03a773aaa38 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378
Joe Perchesae9540f72010-02-09 11:49:52 +0000379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
383 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400384
385 status =
386 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400388 if (status)
389 goto exit;
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 type); /* type */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
394 status =
395 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400397 if (status)
398 goto exit;
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 type); /* type */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
403 status =
404 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400406 if (status)
407 goto exit;
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
410 type); /* type */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
414 */
Ron Mercer76b26692009-10-08 09:54:40 +0000415 cam_output = (CAM_OUT_ROUTE_NIC |
416 (qdev->
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400423 break;
424 }
425 case MAC_ADDR_TYPE_VLAN:
426 {
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
432 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
436 index,
437 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400438
439 status =
440 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 if (status)
443 goto exit;
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
446 type | /* type */
447 enable_bit); /* enable/disable */
448 break;
449 }
450 case MAC_ADDR_TYPE_MULTI_FLTR:
451 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400454 status = -EPERM;
455 }
456exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400457 return status;
458}
459
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000460/* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
463 */
464static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465{
466 int status;
467 char zero_mac_addr[ETH_ALEN];
468 char *addr;
469
470 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000471 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000474 } else {
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000479 }
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 return status;
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000489 return status;
490}
491
Ron Mercer6a473302009-07-02 06:06:12 +0000492void ql_link_on(struct ql_adapter *qdev)
493{
Joe Perchesae9540f72010-02-09 11:49:52 +0000494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
497}
498
499void ql_link_off(struct ql_adapter *qdev)
500{
Joe Perchesae9540f72010-02-09 11:49:52 +0000501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
504}
505
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400506/* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
508 */
509int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510{
511 int status = 0;
512
Ron Mercer939678f2009-01-04 17:08:29 -0800513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400514 if (status)
515 goto exit;
516
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400520 if (status)
521 goto exit;
522 *value = ql_read32(qdev, RT_DATA);
523exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 return status;
525}
526
527/* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
531 */
532static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int enable)
534{
Ron Mercer8587ea32009-02-23 10:42:15 +0000535 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400536 u32 value = 0;
537
Joe Perchesae9540f72010-02-09 11:49:52 +0000538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400559
560 switch (mask) {
561 case RT_IDX_CAM_HIT:
562 {
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
576 {
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 break;
581 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583 {
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
588 break;
589 }
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591 {
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
596 break;
597 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
599 {
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
606 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000607 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
613 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000614 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 break;
618 }
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
620 {
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 break;
625 }
626 case 0: /* Clear the E-bit on an entry. */
627 {
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
631 break;
632 }
633 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400636 status = -EPERM;
637 goto exit;
638 }
639
640 if (value) {
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 if (status)
643 goto exit;
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 }
648exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 return status;
650}
651
652static void ql_enable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655}
656
657static void ql_disable_interrupts(struct ql_adapter *qdev)
658{
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660}
661
662/* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300665 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400666 * a worker finishes. Once it hits zero we enable the interrupt.
667 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 u32 var = 0;
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
673
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
677 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400678 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700679 ctx->intr_en_mask);
680 var = ql_read32(qdev, STS);
681 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
687 ctx->intr_en_mask);
688 var = ql_read32(qdev, STS);
689 }
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400692}
693
694static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695{
696 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700697 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400698
Ron Mercerbb0d2152008-10-20 10:30:26 -0700699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
701 */
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 return 0;
704
705 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000706 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400708 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700709 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400710 var = ql_read32(qdev, STS);
711 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700712 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000713 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400714 return var;
715}
716
717static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718{
719 int i;
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
724 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726 i == 0))
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400728 ql_enable_completion_interrupt(qdev, i);
729 }
730
731}
732
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000733static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734{
735 int status, i;
736 u16 csum = 0;
737 __le16 *flash = (__le16 *)&qdev->flash;
738
739 status = strncmp((char *)&qdev->flash, str, 4);
740 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000742 return status;
743 }
744
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
747
748 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000751
752 return csum;
753}
754
Ron Mercer26351472009-02-02 13:53:57 -0800755static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400756{
757 int status = 0;
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 if (status)
762 goto exit;
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 if (status)
769 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
773 */
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400775exit:
776 return status;
777}
778
Ron Mercercdca8d02009-03-02 08:07:31 +0000779static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780{
781 u32 i, size;
782 int status;
783 __le32 *p = (__le32 *)&qdev->flash;
784 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000785 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000786
787 /* Get flash offset for function and adjust
788 * for dword access.
789 */
Ron Mercere4552f52009-06-09 05:39:32 +0000790 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792 else
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 return -ETIMEDOUT;
797
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
801 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000804 goto exit;
805 }
806 }
807
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
810 "8000");
811 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000813 status = -EINVAL;
814 goto exit;
815 }
816
Ron Mercer542512e2009-06-09 05:39:33 +0000817 /* Extract either manufacturer or BOFM modified
818 * MAC address.
819 */
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
821 memcpy(mac_addr,
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
824 else
825 memcpy(mac_addr,
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
828
829 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000831 status = -EINVAL;
832 goto exit;
833 }
834
835 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000836 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000837 qdev->ndev->addr_len);
838
839exit:
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
841 return status;
842}
843
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000844static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845{
846 int i;
847 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800848 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800849 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800851
852 /* Second function's parameters follow the first
853 * function's.
854 */
Ron Mercere4552f52009-06-09 05:39:32 +0000855 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000856 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400857
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 return -ETIMEDOUT;
860
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000861 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800862 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400863 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400866 goto exit;
867 }
868
869 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000870
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
873 "8012");
874 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000876 status = -EINVAL;
877 goto exit;
878 }
879
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881 status = -EINVAL;
882 goto exit;
883 }
884
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
888
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400889exit:
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
891 return status;
892}
893
894/* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
897 */
898static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899{
900 int status;
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 if (status)
905 return status;
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
910 return status;
911}
912
913/* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
916 */
917int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918{
919 int status = 0;
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 if (status)
924 goto exit;
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930 if (status)
931 goto exit;
932 /* get the data */
933 *data = ql_read32(qdev, XGMAC_DATA);
934exit:
935 return status;
936}
937
938/* This is used for reading the 64-bit statistics regs. */
939int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940{
941 int status = 0;
942 u32 hi = 0;
943 u32 lo = 0;
944
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
946 if (status)
947 goto exit;
948
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950 if (status)
951 goto exit;
952
953 *data = (u64) lo | ((u64) hi << 32);
954
955exit:
956 return status;
957}
958
Ron Mercercdca8d02009-03-02 08:07:31 +0000959static int ql_8000_port_initialize(struct ql_adapter *qdev)
960{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000961 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000962 /*
963 * Get MPI firmware version for driver banner
964 * and ethool info.
965 */
966 status = ql_mb_about_fw(qdev);
967 if (status)
968 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000969 status = ql_mb_get_fw_state(qdev);
970 if (status)
971 goto exit;
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974exit:
975 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000976}
977
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400978/* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
982 * later date.
983 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000984static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400985{
986 int status = 0;
987 u32 data;
988
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
992 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400999 }
1000 return status;
1001 }
1002
Joe Perchesae9540f72010-02-09 11:49:52 +00001003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 if (status)
1007 goto end;
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010 if (status)
1011 goto end;
1012
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019 if (status)
1020 goto end;
1021
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 if (status)
1025 goto end;
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029 if (status)
1030 goto end;
1031
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 if (status)
1035 goto end;
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 if (status)
1040 goto end;
1041
1042 /* Turn on jumbo. */
1043 status =
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045 if (status)
1046 goto end;
1047 status =
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049 if (status)
1050 goto end;
1051
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054end:
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056 return status;
1057}
1058
Ron Mercer7c734352009-10-19 03:32:19 +00001059static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060{
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1072 return lbq_desc;
1073}
1074
Ron Mercer7c734352009-10-19 03:32:19 +00001075static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1077{
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001081 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1084
1085 /* If it's the last chunk of our master page then
1086 * we unmap it.
1087 */
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1094 return lbq_desc;
1095}
1096
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001098static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001099{
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1105 return sbq_desc;
1106}
1107
1108/* Update an rx ring index. */
1109static void ql_update_cq(struct rx_ring *rx_ring)
1110{
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1116 }
1117}
1118
1119static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120{
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122}
1123
Ron Mercer7c734352009-10-19 03:32:19 +00001124static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1126{
1127 if (!rx_ring->pg_chunk.page) {
1128 u64 map;
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130 GFP_ATOMIC,
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001135 return -ENOMEM;
1136 }
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001146 return -ENOMEM;
1147 }
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 }
1151
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1154 */
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157 /* Adjust the master page chunk for next
1158 * buffer get.
1159 */
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1164 } else {
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1168 }
1169 return 0;
1170}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171/* Process (refill) a large buffer queue. */
1172static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173{
Ron Mercer49f21862009-02-23 10:42:16 +00001174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001177 u64 map;
1178 int i;
1179
Ron Mercer7c734352009-10-19 03:32:19 +00001180 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001181 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1184 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1189 return;
1190 }
Ron Mercer7c734352009-10-19 03:32:19 +00001191
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001196 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001197 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001198
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001202 clean_idx++;
1203 if (clean_idx == rx_ring->lbq_len)
1204 clean_idx = 0;
1205 }
1206
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001211 rx_ring->lbq_free_cnt -= 16;
1212 }
1213
1214 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001220 }
1221}
1222
1223/* Process (refill) a small buffer queue. */
1224static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225{
Ron Mercer49f21862009-02-23 10:42:16 +00001226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001229 u64 map;
1230 int i;
1231
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1237 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001238 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001239 netif_printk(qdev, rx_status, KERN_DEBUG,
1240 qdev->ndev,
1241 "sbq: getting new skb for index %d.\n",
1242 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001243 sbq_desc->p.skb =
1244 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001245 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001246 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001249 rx_ring->sbq_clean_idx = clean_idx;
1250 return;
1251 }
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001257 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001260 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001263 return;
1264 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001267 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001268 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001269 }
1270
1271 clean_idx++;
1272 if (clean_idx == rx_ring->sbq_len)
1273 clean_idx = 0;
1274 }
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001279 rx_ring->sbq_free_cnt -= 16;
1280 }
1281
1282 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 }
1289}
1290
1291static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1293{
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1296}
1297
1298/* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1300 */
1301static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1303{
1304 int i;
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1307 /*
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1314 * then its an OAL.
1315 */
1316 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001317 netif_printk(qdev, tx_done, KERN_DEBUG,
1318 qdev->ndev,
1319 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001320 }
1321 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001322 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001324 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001325 maplen),
1326 PCI_DMA_TODEVICE);
1327 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001330 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001331 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001332 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001333 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001334 maplen), PCI_DMA_TODEVICE);
1335 }
1336 }
1337
1338}
1339
1340/* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342 */
1343static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346{
1347 int len = skb_headlen(skb);
1348 dma_addr_t map;
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001356 }
1357 /*
1358 * Map the skb buffer first.
1359 */
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1363 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001366
1367 return NETDEV_TX_BUSY;
1368 }
1369
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001374 map_idx++;
1375
1376 /*
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1381 * in this list.
1382 */
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385 tbd++;
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1389 * look like this:
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1403 * etc...
1404 */
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 sizeof(struct oal),
1408 PCI_DMA_TODEVICE);
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1410 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1413 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001414 goto map_error;
1415 }
1416
1417 tbd->addr = cpu_to_le64(map);
1418 /*
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1422 */
1423 tbd->len =
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001427 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431 map_idx++;
1432 }
1433
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
1435 PCI_DMA_TODEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001436
1437 err = pci_dma_mapping_error(qdev->pdev, map);
1438 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1441 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001442 goto map_error;
1443 }
1444
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001449 frag->size);
1450
1451 }
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1457
1458map_error:
1459 /*
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1464 */
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1467}
1468
Ron Mercer4f848c02010-01-02 10:37:43 +00001469/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001470static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 u32 length,
1474 u16 vlan_id)
1475{
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001478 struct napi_struct *napi = &rx_ring->napi;
1479
1480 napi->dev = qdev->ndev;
1481
1482 skb = napi_get_frags(napi);
1483 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1488 return;
1489 }
1490 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1494 length);
Ron Mercer63526712010-01-02 10:37:44 +00001495
1496 skb->len += length;
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1500
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001508}
1509
1510/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001511static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1514 u32 length,
1515 u16 vlan_id)
1516{
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1519 void *addr;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1522
1523 skb = netdev_alloc_skb(ndev, length);
1524 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1529 return;
1530 }
1531
1532 addr = lbq_desc->p.pg_chunk.va;
1533 prefetch(addr);
1534
1535
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001538 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001540 rx_ring->rx_errors++;
1541 goto err_out;
1542 }
1543
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1546 */
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001550 rx_ring->rx_dropped++;
1551 goto err_out;
1552 }
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559 length-ETH_HLEN);
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1563
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001567 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001568
Michał Mirosław88230fd2011-04-18 13:31:21 +00001569 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571 /* TCP frame. */
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data;
1580 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001583 netif_printk(qdev, rx_status, KERN_DEBUG,
1584 qdev->ndev,
1585 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001586 }
1587 }
1588 }
1589
1590 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001591 if (vlan_id != 0xffff)
1592 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 napi_gro_receive(napi, skb);
1595 else
1596 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001597 return;
1598err_out:
1599 dev_kfree_skb_any(skb);
1600 put_page(lbq_desc->p.pg_chunk.page);
1601}
1602
1603/* Process an inbound completion from an rx ring. */
1604static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 struct rx_ring *rx_ring,
1606 struct ib_mac_iocb_rsp *ib_mac_rsp,
1607 u32 length,
1608 u16 vlan_id)
1609{
1610 struct net_device *ndev = qdev->ndev;
1611 struct sk_buff *skb = NULL;
1612 struct sk_buff *new_skb = NULL;
1613 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615 skb = sbq_desc->p.skb;
1616 /* Allocate new_skb and copy */
1617 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001619 netif_err(qdev, probe, qdev->ndev,
1620 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001621 rx_ring->rx_dropped++;
1622 return;
1623 }
1624 skb_reserve(new_skb, NET_IP_ALIGN);
1625 memcpy(skb_put(new_skb, length), skb->data, length);
1626 skb = new_skb;
1627
1628 /* Frame error, so drop the packet. */
1629 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001630 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001631 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_errors++;
1634 return;
1635 }
1636
1637 /* loopback self test for ethtool */
1638 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639 ql_check_lb_frame(qdev, skb);
1640 dev_kfree_skb_any(skb);
1641 return;
1642 }
1643
1644 /* The max framesize filter on this chip is set higher than
1645 * MTU since FCoE uses 2k frames.
1646 */
1647 if (skb->len > ndev->mtu + ETH_HLEN) {
1648 dev_kfree_skb_any(skb);
1649 rx_ring->rx_dropped++;
1650 return;
1651 }
1652
1653 prefetch(skb->data);
1654 skb->dev = ndev;
1655 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001656 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657 "%s Multicast.\n",
1658 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001664 }
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001668
1669 rx_ring->rx_packets++;
1670 rx_ring->rx_bytes += skb->len;
1671 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001672 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001673
1674 /* If rx checksum is on, and there are no
1675 * csum or frame errors.
1676 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001677 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001678 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679 /* TCP frame. */
1680 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001681 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001683 skb->ip_summed = CHECKSUM_UNNECESSARY;
1684 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686 /* Unfragmented ipv4 UDP frame. */
1687 struct iphdr *iph = (struct iphdr *) skb->data;
1688 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001689 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001691 netif_printk(qdev, rx_status, KERN_DEBUG,
1692 qdev->ndev,
1693 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001694 }
1695 }
1696 }
1697
1698 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001699 if (vlan_id != 0xffff)
1700 __vlan_hwaccel_put_tag(skb, vlan_id);
1701 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 napi_gro_receive(&rx_ring->napi, skb);
1703 else
1704 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001705}
1706
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001707static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001708{
1709 void *temp_addr = skb->data;
1710
1711 /* Undo the skb_reserve(skb,32) we did before
1712 * giving to hardware, and realign data on
1713 * a 2-byte boundary.
1714 */
1715 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 skb_copy_to_linear_data(skb, temp_addr,
1718 (unsigned int)len);
1719}
1720
1721/*
1722 * This function builds an skb for the given inbound
1723 * completion. It will be rewritten for readability in the near
1724 * future, but for not it works well.
1725 */
1726static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727 struct rx_ring *rx_ring,
1728 struct ib_mac_iocb_rsp *ib_mac_rsp)
1729{
1730 struct bq_desc *lbq_desc;
1731 struct bq_desc *sbq_desc;
1732 struct sk_buff *skb = NULL;
1733 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736 /*
1737 * Handle the header buffer if present.
1738 */
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001741 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001743 /*
1744 * Headers fit nicely into a small buffer.
1745 */
1746 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001748 dma_unmap_addr(sbq_desc, mapaddr),
1749 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 PCI_DMA_FROMDEVICE);
1751 skb = sbq_desc->p.skb;
1752 ql_realign_skb(skb, hdr_len);
1753 skb_put(skb, hdr_len);
1754 sbq_desc->p.skb = NULL;
1755 }
1756
1757 /*
1758 * Handle the data buffer(s).
1759 */
1760 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001761 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001763 return skb;
1764 }
1765
1766 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "Headers in small, data of %d bytes in small, combine them.\n",
1770 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001771 /*
1772 * Data is less than small buffer size so it's
1773 * stuffed in a small buffer.
1774 * For this case we append the data
1775 * from the "data" small buffer to the "header" small
1776 * buffer.
1777 */
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001780 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001781 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001782 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001783 (sbq_desc, maplen),
1784 PCI_DMA_FROMDEVICE);
1785 memcpy(skb_put(skb, length),
1786 sbq_desc->p.skb->data, length);
1787 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001788 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001789 (sbq_desc,
1790 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001791 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001792 (sbq_desc,
1793 maplen),
1794 PCI_DMA_FROMDEVICE);
1795 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "%d bytes in a single small buffer.\n",
1798 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001799 sbq_desc = ql_get_curr_sbuf(rx_ring);
1800 skb = sbq_desc->p.skb;
1801 ql_realign_skb(skb, length);
1802 skb_put(skb, length);
1803 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001806 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001807 maplen),
1808 PCI_DMA_FROMDEVICE);
1809 sbq_desc->p.skb = NULL;
1810 }
1811 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "Header in small, %d bytes in large. Chain large to small!\n",
1815 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 /*
1817 * The data is in a single large buffer. We
1818 * chain it to the header buffer's skb and let
1819 * it rip.
1820 */
Ron Mercer7c734352009-10-19 03:32:19 +00001821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001822 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823 "Chaining page at offset = %d, for %d bytes to skb.\n",
1824 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001825 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826 lbq_desc->p.pg_chunk.offset,
1827 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 skb->len += length;
1829 skb->data_len += length;
1830 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001831 } else {
1832 /*
1833 * The headers and data are in a single large buffer. We
1834 * copy it to a new skb and let it go. This can happen with
1835 * jumbo mtu on a non-TCP/UDP frame.
1836 */
Ron Mercer7c734352009-10-19 03:32:19 +00001837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001838 skb = netdev_alloc_skb(qdev->ndev, length);
1839 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001840 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001842 return NULL;
1843 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001844 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001845 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001846 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001847 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001848 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001850 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001853 skb_fill_page_desc(skb, 0,
1854 lbq_desc->p.pg_chunk.page,
1855 lbq_desc->p.pg_chunk.offset,
1856 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001857 skb->len += length;
1858 skb->data_len += length;
1859 skb->truesize += length;
1860 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001861 __pskb_pull_tail(skb,
1862 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863 VLAN_ETH_HLEN : ETH_HLEN);
1864 }
1865 } else {
1866 /*
1867 * The data is in a chain of large buffers
1868 * pointed to by a small buffer. We loop
1869 * thru and chain them to the our small header
1870 * buffer's skb.
1871 * frags: There are 18 max frags and our small
1872 * buffer will hold 32 of them. The thing is,
1873 * we'll use 3 max for our 9000 byte jumbo
1874 * frames. If the MTU goes up we could
1875 * eventually be in trouble.
1876 */
Ron Mercer7c734352009-10-19 03:32:19 +00001877 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001880 dma_unmap_addr(sbq_desc, mapaddr),
1881 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001882 PCI_DMA_FROMDEVICE);
1883 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884 /*
1885 * This is an non TCP/UDP IP frame, so
1886 * the headers aren't split into a small
1887 * buffer. We have to use the small buffer
1888 * that contains our sg list as our skb to
1889 * send upstairs. Copy the sg list here to
1890 * a local buffer and use it to find the
1891 * pages to chain.
1892 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001893 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894 "%d bytes of headers & data in chain of large.\n",
1895 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001897 sbq_desc->p.skb = NULL;
1898 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899 }
1900 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001901 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902 size = (length < rx_ring->lbq_buf_size) ? length :
1903 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001904
Joe Perchesae9540f72010-02-09 11:49:52 +00001905 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 "Adding page %d to skb for %d bytes.\n",
1907 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001908 skb_fill_page_desc(skb, i,
1909 lbq_desc->p.pg_chunk.page,
1910 lbq_desc->p.pg_chunk.offset,
1911 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001912 skb->len += size;
1913 skb->data_len += size;
1914 skb->truesize += size;
1915 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001916 i++;
1917 }
1918 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919 VLAN_ETH_HLEN : ETH_HLEN);
1920 }
1921 return skb;
1922}
1923
1924/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001925static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001926 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001927 struct ib_mac_iocb_rsp *ib_mac_rsp,
1928 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929{
1930 struct net_device *ndev = qdev->ndev;
1931 struct sk_buff *skb = NULL;
1932
1933 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001939 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001940 return;
1941 }
1942
Ron Mercera32959c2009-06-09 05:39:27 +00001943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001945 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001946 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001947 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001948 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001949 return;
1950 }
Ron Mercerec33a492009-06-09 05:39:28 +00001951
1952 /* The max framesize filter on this chip is set higher than
1953 * MTU since FCoE uses 2k frames.
1954 */
1955 if (skb->len > ndev->mtu + ETH_HLEN) {
1956 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001957 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001958 return;
1959 }
1960
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001961 /* loopback self test for ethtool */
1962 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963 ql_check_lb_frame(qdev, skb);
1964 dev_kfree_skb_any(skb);
1965 return;
1966 }
1967
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001968 prefetch(skb->data);
1969 skb->dev = ndev;
1970 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001978 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001979 }
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001981 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001983 }
Ron Mercerd555f592009-03-09 10:59:19 +00001984
Ron Mercerd555f592009-03-09 10:59:19 +00001985 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001986 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001987
1988 /* If rx checksum is on, and there are no
1989 * csum or frame errors.
1990 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001991 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001992 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1993 /* TCP frame. */
1994 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001995 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001997 skb->ip_summed = CHECKSUM_UNNECESSARY;
1998 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000 /* Unfragmented ipv4 UDP frame. */
2001 struct iphdr *iph = (struct iphdr *) skb->data;
2002 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002003 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002004 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002007 }
2008 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002009 }
Ron Mercerd555f592009-03-09 10:59:19 +00002010
Ron Mercer885ee392009-11-03 13:49:31 +00002011 rx_ring->rx_packets++;
2012 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002013 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002014 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015 __vlan_hwaccel_put_tag(skb, vlan_id);
2016 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017 napi_gro_receive(&rx_ring->napi, skb);
2018 else
2019 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002020}
2021
Ron Mercer4f848c02010-01-02 10:37:43 +00002022/* Process an inbound completion from an rx ring. */
2023static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024 struct rx_ring *rx_ring,
2025 struct ib_mac_iocb_rsp *ib_mac_rsp)
2026{
2027 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2031
2032 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2033
2034 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035 /* The data and headers are split into
2036 * separate buffers.
2037 */
2038 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2039 vlan_id);
2040 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041 /* The data fit in a single small buffer.
2042 * Allocate a new skb, copy the data and
2043 * return the buffer to the free pool.
2044 */
2045 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2046 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002047 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050 /* TCP packet in a page chunk that's been checksummed.
2051 * Tack it on to our GRO skb and let it go.
2052 */
2053 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2054 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002055 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056 /* Non-TCP packet in a page chunk. Allocate an
2057 * skb, tack it on frags, and send it up.
2058 */
2059 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2060 length, vlan_id);
2061 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002062 /* Non-TCP/UDP large frames that span multiple buffers
2063 * can be processed corrrectly by the split frame logic.
2064 */
2065 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002067 }
2068
2069 return (unsigned long)length;
2070}
2071
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002072/* Process an outbound completion from an rx ring. */
2073static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 struct ob_mac_iocb_rsp *mac_rsp)
2075{
2076 struct tx_ring *tx_ring;
2077 struct tx_ring_desc *tx_ring_desc;
2078
2079 QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002083 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002085 dev_kfree_skb(tx_ring_desc->skb);
2086 tx_ring_desc->skb = NULL;
2087
2088 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089 OB_MAC_IOCB_RSP_S |
2090 OB_MAC_IOCB_RSP_L |
2091 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002093 netif_warn(qdev, tx_done, qdev->ndev,
2094 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002095 }
2096 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002097 netif_warn(qdev, tx_done, qdev->ndev,
2098 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002099 }
2100 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002101 netif_warn(qdev, tx_done, qdev->ndev,
2102 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002103 }
2104 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002105 netif_warn(qdev, tx_done, qdev->ndev,
2106 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002107 }
2108 }
2109 atomic_inc(&tx_ring->tx_count);
2110}
2111
2112/* Fire up a handler to reset the MPI processor. */
2113void ql_queue_fw_error(struct ql_adapter *qdev)
2114{
Ron Mercer6a473302009-07-02 06:06:12 +00002115 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002116 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117}
2118
2119void ql_queue_asic_error(struct ql_adapter *qdev)
2120{
Ron Mercer6a473302009-07-02 06:06:12 +00002121 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2125 * thread
2126 */
2127 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002128 /* Set asic recovery bit to indicate reset process that we are
2129 * in fatal error recovery process rather than normal close
2130 */
2131 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2133}
2134
2135static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136 struct ib_ae_iocb_rsp *ib_ae_rsp)
2137{
2138 switch (ib_ae_rsp->event) {
2139 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002140 netif_err(qdev, rx_err, qdev->ndev,
2141 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002142 ql_queue_fw_error(qdev);
2143 return;
2144
2145 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002146 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002148 ql_queue_asic_error(qdev);
2149 return;
2150
2151 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002152 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002153 ql_queue_asic_error(qdev);
2154 break;
2155
2156 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002157 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158 "anonymous buffers from rx_ring %d.\n",
2159 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160 ql_queue_asic_error(qdev);
2161 break;
2162
2163 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002164 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2165 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 ql_queue_asic_error(qdev);
2167 break;
2168 }
2169}
2170
2171static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2172{
2173 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002174 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002175 struct ob_mac_iocb_rsp *net_rsp = NULL;
2176 int count = 0;
2177
Ron Mercer1e213302009-03-09 10:59:21 +00002178 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002179 /* While there are entries in the completion queue. */
2180 while (prod != rx_ring->cnsmr_idx) {
2181
Joe Perchesae9540f72010-02-09 11:49:52 +00002182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185
2186 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2187 rmb();
2188 switch (net_rsp->opcode) {
2189
2190 case OPCODE_OB_MAC_TSO_IOCB:
2191 case OPCODE_OB_MAC_IOCB:
2192 ql_process_mac_tx_intr(qdev, net_rsp);
2193 break;
2194 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002195 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002198 }
2199 count++;
2200 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002201 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002202 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002203 if (!net_rsp)
2204 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002205 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002206 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002207 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002208 if (atomic_read(&tx_ring->queue_stopped) &&
2209 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2210 /*
2211 * The queue got stopped because the tx_ring was full.
2212 * Wake it up, because it's now at least 25% empty.
2213 */
Ron Mercer1e213302009-03-09 10:59:21 +00002214 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002215 }
2216
2217 return count;
2218}
2219
2220static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2221{
2222 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002223 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 struct ql_net_rsp_iocb *net_rsp;
2225 int count = 0;
2226
2227 /* While there are entries in the completion queue. */
2228 while (prod != rx_ring->cnsmr_idx) {
2229
Joe Perchesae9540f72010-02-09 11:49:52 +00002230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002233
2234 net_rsp = rx_ring->curr_entry;
2235 rmb();
2236 switch (net_rsp->opcode) {
2237 case OPCODE_IB_MAC_IOCB:
2238 ql_process_mac_rx_intr(qdev, rx_ring,
2239 (struct ib_mac_iocb_rsp *)
2240 net_rsp);
2241 break;
2242
2243 case OPCODE_IB_AE_IOCB:
2244 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2245 net_rsp);
2246 break;
2247 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250 net_rsp->opcode);
2251 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002252 }
2253 count++;
2254 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002255 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002256 if (count == budget)
2257 break;
2258 }
2259 ql_update_buffer_queues(qdev, rx_ring);
2260 ql_write_cq_idx(rx_ring);
2261 return count;
2262}
2263
2264static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2265{
2266 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002268 struct rx_ring *trx_ring;
2269 int i, work_done = 0;
2270 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271
Joe Perchesae9540f72010-02-09 11:49:52 +00002272 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002274
Ron Mercer39aa8162009-08-27 11:02:11 +00002275 /* Service the TX rings first. They start
2276 * right after the RSS rings. */
2277 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278 trx_ring = &qdev->rx_ring[i];
2279 /* If this TX completion ring belongs to this vector and
2280 * it's not empty then service it.
2281 */
2282 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002285 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286 "%s: Servicing TX completion ring %d.\n",
2287 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002288 ql_clean_outbound_rx_ring(trx_ring);
2289 }
2290 }
2291
2292 /*
2293 * Now service the RSS ring if it's active.
2294 */
2295 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002297 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298 "%s: Servicing RX completion ring %d.\n",
2299 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002300 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2301 }
2302
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002303 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002304 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002305 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306 }
2307 return work_done;
2308}
2309
Jiri Pirko18c49b92011-07-21 03:24:11 +00002310static void qlge_vlan_mode(struct net_device *ndev, u32 features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002311{
2312 struct ql_adapter *qdev = netdev_priv(ndev);
2313
Jiri Pirko18c49b92011-07-21 03:24:11 +00002314 if (features & NETIF_F_HW_VLAN_RX) {
2315 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002316 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002317 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002318 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002319 } else {
Jiri Pirko18c49b92011-07-21 03:24:11 +00002320 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002321 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002322 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323 }
2324}
2325
Jiri Pirko18c49b92011-07-21 03:24:11 +00002326static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2327{
2328 /*
2329 * Since there is no support for separate rx/tx vlan accel
2330 * enable/disable make sure tx flag is always in same state as rx.
2331 */
2332 if (features & NETIF_F_HW_VLAN_RX)
2333 features |= NETIF_F_HW_VLAN_TX;
2334 else
2335 features &= ~NETIF_F_HW_VLAN_TX;
2336
2337 return features;
2338}
2339
2340static int qlge_set_features(struct net_device *ndev, u32 features)
2341{
2342 u32 changed = ndev->features ^ features;
2343
2344 if (changed & NETIF_F_HW_VLAN_RX)
2345 qlge_vlan_mode(ndev, features);
2346
2347 return 0;
2348}
2349
2350static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2351{
2352 u32 enable_bit = MAC_ADDR_E;
2353
2354 if (ql_set_mac_addr_reg
2355 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2356 netif_err(qdev, ifup, qdev->ndev,
2357 "Failed to init vlan address.\n");
2358 }
2359}
2360
Ron Mercer01e6b952009-10-30 12:13:34 +00002361static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002362{
2363 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002364 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002365
Ron Mercercc288f52009-02-23 10:42:14 +00002366 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367 if (status)
2368 return;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002369
2370 __qlge_vlan_rx_add_vid(qdev, vid);
2371 set_bit(vid, qdev->active_vlans);
2372
Ron Mercercc288f52009-02-23 10:42:14 +00002373 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002374}
2375
Jiri Pirko18c49b92011-07-21 03:24:11 +00002376static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002377{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002378 u32 enable_bit = 0;
2379
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002380 if (ql_set_mac_addr_reg
2381 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002382 netif_err(qdev, ifup, qdev->ndev,
2383 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384 }
Jiri Pirko18c49b92011-07-21 03:24:11 +00002385}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002386
Jiri Pirko18c49b92011-07-21 03:24:11 +00002387static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2388{
2389 struct ql_adapter *qdev = netdev_priv(ndev);
2390 int status;
2391
2392 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2393 if (status)
2394 return;
2395
2396 __qlge_vlan_rx_kill_vid(qdev, vid);
2397 clear_bit(vid, qdev->active_vlans);
2398
2399 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002400}
2401
Ron Mercerc1b60092010-10-27 04:58:12 +00002402static void qlge_restore_vlan(struct ql_adapter *qdev)
2403{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002404 int status;
2405 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002406
Jiri Pirko18c49b92011-07-21 03:24:11 +00002407 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2408 if (status)
2409 return;
2410
2411 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2412 __qlge_vlan_rx_add_vid(qdev, vid);
2413
2414 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002415}
2416
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002417/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2418static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2419{
2420 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002421 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002422 return IRQ_HANDLED;
2423}
2424
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002425/* This handles a fatal error, MPI activity, and the default
2426 * rx_ring in an MSI-X multiple vector environment.
2427 * In MSI/Legacy environment it also process the rest of
2428 * the rx_rings.
2429 */
2430static irqreturn_t qlge_isr(int irq, void *dev_id)
2431{
2432 struct rx_ring *rx_ring = dev_id;
2433 struct ql_adapter *qdev = rx_ring->qdev;
2434 struct intr_context *intr_context = &qdev->intr_context[0];
2435 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002436 int work_done = 0;
2437
Ron Mercerbb0d2152008-10-20 10:30:26 -07002438 spin_lock(&qdev->hw_lock);
2439 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002440 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2441 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002442 spin_unlock(&qdev->hw_lock);
2443 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002444 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002445 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002446
Ron Mercerbb0d2152008-10-20 10:30:26 -07002447 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448
2449 /*
2450 * Check for fatal error.
2451 */
2452 if (var & STS_FE) {
2453 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002454 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002455 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002456 netdev_err(qdev->ndev, "Resetting chip. "
2457 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002458 return IRQ_HANDLED;
2459 }
2460
2461 /*
2462 * Check MPI processor activity.
2463 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002464 if ((var & STS_PI) &&
2465 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002466 /*
2467 * We've got an async event or mailbox completion.
2468 * Handle it and clear the source of the interrupt.
2469 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002470 netif_err(qdev, intr, qdev->ndev,
2471 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002472 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002473 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2474 queue_delayed_work_on(smp_processor_id(),
2475 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002476 work_done++;
2477 }
2478
2479 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002480 * Get the bit-mask that shows the active queues for this
2481 * pass. Compare it to the queues that this irq services
2482 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002483 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002484 var = ql_read32(qdev, ISR1);
2485 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002486 netif_info(qdev, intr, qdev->ndev,
2487 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002488 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002489 napi_schedule(&rx_ring->napi);
2490 work_done++;
2491 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002492 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002493 return work_done ? IRQ_HANDLED : IRQ_NONE;
2494}
2495
2496static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2497{
2498
2499 if (skb_is_gso(skb)) {
2500 int err;
2501 if (skb_header_cloned(skb)) {
2502 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2503 if (err)
2504 return err;
2505 }
2506
2507 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2508 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2509 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2510 mac_iocb_ptr->total_hdrs_len =
2511 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2512 mac_iocb_ptr->net_trans_offset =
2513 cpu_to_le16(skb_network_offset(skb) |
2514 skb_transport_offset(skb)
2515 << OB_MAC_TRANSPORT_HDR_SHIFT);
2516 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2517 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2518 if (likely(skb->protocol == htons(ETH_P_IP))) {
2519 struct iphdr *iph = ip_hdr(skb);
2520 iph->check = 0;
2521 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2522 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2523 iph->daddr, 0,
2524 IPPROTO_TCP,
2525 0);
2526 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2527 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2528 tcp_hdr(skb)->check =
2529 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2530 &ipv6_hdr(skb)->daddr,
2531 0, IPPROTO_TCP, 0);
2532 }
2533 return 1;
2534 }
2535 return 0;
2536}
2537
2538static void ql_hw_csum_setup(struct sk_buff *skb,
2539 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2540{
2541 int len;
2542 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002543 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002544 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2545 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2546 mac_iocb_ptr->net_trans_offset =
2547 cpu_to_le16(skb_network_offset(skb) |
2548 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2549
2550 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2551 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2552 if (likely(iph->protocol == IPPROTO_TCP)) {
2553 check = &(tcp_hdr(skb)->check);
2554 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2555 mac_iocb_ptr->total_hdrs_len =
2556 cpu_to_le16(skb_transport_offset(skb) +
2557 (tcp_hdr(skb)->doff << 2));
2558 } else {
2559 check = &(udp_hdr(skb)->check);
2560 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2561 mac_iocb_ptr->total_hdrs_len =
2562 cpu_to_le16(skb_transport_offset(skb) +
2563 sizeof(struct udphdr));
2564 }
2565 *check = ~csum_tcpudp_magic(iph->saddr,
2566 iph->daddr, len, iph->protocol, 0);
2567}
2568
Stephen Hemminger613573252009-08-31 19:50:58 +00002569static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002570{
2571 struct tx_ring_desc *tx_ring_desc;
2572 struct ob_mac_iocb_req *mac_iocb_ptr;
2573 struct ql_adapter *qdev = netdev_priv(ndev);
2574 int tso;
2575 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002576 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002577
2578 tx_ring = &qdev->tx_ring[tx_ring_idx];
2579
Ron Mercer74c50b42009-03-09 10:59:27 +00002580 if (skb_padto(skb, ETH_ZLEN))
2581 return NETDEV_TX_OK;
2582
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002583 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002584 netif_info(qdev, tx_queued, qdev->ndev,
2585 "%s: shutting down tx queue %d du to lack of resources.\n",
2586 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002587 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002588 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002589 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002590 return NETDEV_TX_BUSY;
2591 }
2592 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2593 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002594 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002595
2596 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2597 mac_iocb_ptr->tid = tx_ring_desc->index;
2598 /* We use the upper 32-bits to store the tx queue for this IO.
2599 * When we get the completion we can use it to establish the context.
2600 */
2601 mac_iocb_ptr->txq_idx = tx_ring_idx;
2602 tx_ring_desc->skb = skb;
2603
2604 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2605
Jesse Grosseab6d182010-10-20 13:56:03 +00002606 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002607 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2608 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002609 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2610 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2611 }
2612 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2613 if (tso < 0) {
2614 dev_kfree_skb_any(skb);
2615 return NETDEV_TX_OK;
2616 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2617 ql_hw_csum_setup(skb,
2618 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2619 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002620 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2621 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002622 netif_err(qdev, tx_queued, qdev->ndev,
2623 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002624 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002625 return NETDEV_TX_BUSY;
2626 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002627 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2628 tx_ring->prod_idx++;
2629 if (tx_ring->prod_idx == tx_ring->wq_len)
2630 tx_ring->prod_idx = 0;
2631 wmb();
2632
2633 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002634 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2635 "tx queued, slot %d, len %d\n",
2636 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002637
2638 atomic_dec(&tx_ring->tx_count);
2639 return NETDEV_TX_OK;
2640}
2641
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002642
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002643static void ql_free_shadow_space(struct ql_adapter *qdev)
2644{
2645 if (qdev->rx_ring_shadow_reg_area) {
2646 pci_free_consistent(qdev->pdev,
2647 PAGE_SIZE,
2648 qdev->rx_ring_shadow_reg_area,
2649 qdev->rx_ring_shadow_reg_dma);
2650 qdev->rx_ring_shadow_reg_area = NULL;
2651 }
2652 if (qdev->tx_ring_shadow_reg_area) {
2653 pci_free_consistent(qdev->pdev,
2654 PAGE_SIZE,
2655 qdev->tx_ring_shadow_reg_area,
2656 qdev->tx_ring_shadow_reg_dma);
2657 qdev->tx_ring_shadow_reg_area = NULL;
2658 }
2659}
2660
2661static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2662{
2663 qdev->rx_ring_shadow_reg_area =
2664 pci_alloc_consistent(qdev->pdev,
2665 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2666 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002669 return -ENOMEM;
2670 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002671 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002672 qdev->tx_ring_shadow_reg_area =
2673 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2674 &qdev->tx_ring_shadow_reg_dma);
2675 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002676 netif_err(qdev, ifup, qdev->ndev,
2677 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002678 goto err_wqp_sh_area;
2679 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002680 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002681 return 0;
2682
2683err_wqp_sh_area:
2684 pci_free_consistent(qdev->pdev,
2685 PAGE_SIZE,
2686 qdev->rx_ring_shadow_reg_area,
2687 qdev->rx_ring_shadow_reg_dma);
2688 return -ENOMEM;
2689}
2690
2691static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2692{
2693 struct tx_ring_desc *tx_ring_desc;
2694 int i;
2695 struct ob_mac_iocb_req *mac_iocb_ptr;
2696
2697 mac_iocb_ptr = tx_ring->wq_base;
2698 tx_ring_desc = tx_ring->q;
2699 for (i = 0; i < tx_ring->wq_len; i++) {
2700 tx_ring_desc->index = i;
2701 tx_ring_desc->skb = NULL;
2702 tx_ring_desc->queue_entry = mac_iocb_ptr;
2703 mac_iocb_ptr++;
2704 tx_ring_desc++;
2705 }
2706 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2707 atomic_set(&tx_ring->queue_stopped, 0);
2708}
2709
2710static void ql_free_tx_resources(struct ql_adapter *qdev,
2711 struct tx_ring *tx_ring)
2712{
2713 if (tx_ring->wq_base) {
2714 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2715 tx_ring->wq_base, tx_ring->wq_base_dma);
2716 tx_ring->wq_base = NULL;
2717 }
2718 kfree(tx_ring->q);
2719 tx_ring->q = NULL;
2720}
2721
2722static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2723 struct tx_ring *tx_ring)
2724{
2725 tx_ring->wq_base =
2726 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2727 &tx_ring->wq_base_dma);
2728
Joe Perches8e95a202009-12-03 07:58:21 +00002729 if ((tx_ring->wq_base == NULL) ||
2730 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002731 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002732 return -ENOMEM;
2733 }
2734 tx_ring->q =
2735 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2736 if (tx_ring->q == NULL)
2737 goto err;
2738
2739 return 0;
2740err:
2741 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2742 tx_ring->wq_base, tx_ring->wq_base_dma);
2743 return -ENOMEM;
2744}
2745
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002746static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002748 struct bq_desc *lbq_desc;
2749
Ron Mercer7c734352009-10-19 03:32:19 +00002750 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002751
Ron Mercer7c734352009-10-19 03:32:19 +00002752 curr_idx = rx_ring->lbq_curr_idx;
2753 clean_idx = rx_ring->lbq_clean_idx;
2754 while (curr_idx != clean_idx) {
2755 lbq_desc = &rx_ring->lbq[curr_idx];
2756
2757 if (lbq_desc->p.pg_chunk.last_flag) {
2758 pci_unmap_page(qdev->pdev,
2759 lbq_desc->p.pg_chunk.map,
2760 ql_lbq_block_size(qdev),
2761 PCI_DMA_FROMDEVICE);
2762 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002763 }
Ron Mercer7c734352009-10-19 03:32:19 +00002764
2765 put_page(lbq_desc->p.pg_chunk.page);
2766 lbq_desc->p.pg_chunk.page = NULL;
2767
2768 if (++curr_idx == rx_ring->lbq_len)
2769 curr_idx = 0;
2770
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002771 }
2772}
2773
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002774static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775{
2776 int i;
2777 struct bq_desc *sbq_desc;
2778
2779 for (i = 0; i < rx_ring->sbq_len; i++) {
2780 sbq_desc = &rx_ring->sbq[i];
2781 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002782 netif_err(qdev, ifup, qdev->ndev,
2783 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002784 return;
2785 }
2786 if (sbq_desc->p.skb) {
2787 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002788 dma_unmap_addr(sbq_desc, mapaddr),
2789 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002790 PCI_DMA_FROMDEVICE);
2791 dev_kfree_skb(sbq_desc->p.skb);
2792 sbq_desc->p.skb = NULL;
2793 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002794 }
2795}
2796
Ron Mercer4545a3f2009-02-23 10:42:17 +00002797/* Free all large and small rx buffers associated
2798 * with the completion queues for this device.
2799 */
2800static void ql_free_rx_buffers(struct ql_adapter *qdev)
2801{
2802 int i;
2803 struct rx_ring *rx_ring;
2804
2805 for (i = 0; i < qdev->rx_ring_count; i++) {
2806 rx_ring = &qdev->rx_ring[i];
2807 if (rx_ring->lbq)
2808 ql_free_lbq_buffers(qdev, rx_ring);
2809 if (rx_ring->sbq)
2810 ql_free_sbq_buffers(qdev, rx_ring);
2811 }
2812}
2813
2814static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2815{
2816 struct rx_ring *rx_ring;
2817 int i;
2818
2819 for (i = 0; i < qdev->rx_ring_count; i++) {
2820 rx_ring = &qdev->rx_ring[i];
2821 if (rx_ring->type != TX_Q)
2822 ql_update_buffer_queues(qdev, rx_ring);
2823 }
2824}
2825
2826static void ql_init_lbq_ring(struct ql_adapter *qdev,
2827 struct rx_ring *rx_ring)
2828{
2829 int i;
2830 struct bq_desc *lbq_desc;
2831 __le64 *bq = rx_ring->lbq_base;
2832
2833 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2834 for (i = 0; i < rx_ring->lbq_len; i++) {
2835 lbq_desc = &rx_ring->lbq[i];
2836 memset(lbq_desc, 0, sizeof(*lbq_desc));
2837 lbq_desc->index = i;
2838 lbq_desc->addr = bq;
2839 bq++;
2840 }
2841}
2842
2843static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002844 struct rx_ring *rx_ring)
2845{
2846 int i;
2847 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002848 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002849
Ron Mercer4545a3f2009-02-23 10:42:17 +00002850 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002851 for (i = 0; i < rx_ring->sbq_len; i++) {
2852 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002853 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002854 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002855 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002856 bq++;
2857 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002858}
2859
2860static void ql_free_rx_resources(struct ql_adapter *qdev,
2861 struct rx_ring *rx_ring)
2862{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002863 /* Free the small buffer queue. */
2864 if (rx_ring->sbq_base) {
2865 pci_free_consistent(qdev->pdev,
2866 rx_ring->sbq_size,
2867 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2868 rx_ring->sbq_base = NULL;
2869 }
2870
2871 /* Free the small buffer queue control blocks. */
2872 kfree(rx_ring->sbq);
2873 rx_ring->sbq = NULL;
2874
2875 /* Free the large buffer queue. */
2876 if (rx_ring->lbq_base) {
2877 pci_free_consistent(qdev->pdev,
2878 rx_ring->lbq_size,
2879 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2880 rx_ring->lbq_base = NULL;
2881 }
2882
2883 /* Free the large buffer queue control blocks. */
2884 kfree(rx_ring->lbq);
2885 rx_ring->lbq = NULL;
2886
2887 /* Free the rx queue. */
2888 if (rx_ring->cq_base) {
2889 pci_free_consistent(qdev->pdev,
2890 rx_ring->cq_size,
2891 rx_ring->cq_base, rx_ring->cq_base_dma);
2892 rx_ring->cq_base = NULL;
2893 }
2894}
2895
2896/* Allocate queues and buffers for this completions queue based
2897 * on the values in the parameter structure. */
2898static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2899 struct rx_ring *rx_ring)
2900{
2901
2902 /*
2903 * Allocate the completion queue for this rx_ring.
2904 */
2905 rx_ring->cq_base =
2906 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2907 &rx_ring->cq_base_dma);
2908
2909 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002910 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002911 return -ENOMEM;
2912 }
2913
2914 if (rx_ring->sbq_len) {
2915 /*
2916 * Allocate small buffer queue.
2917 */
2918 rx_ring->sbq_base =
2919 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2920 &rx_ring->sbq_base_dma);
2921
2922 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002923 netif_err(qdev, ifup, qdev->ndev,
2924 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002925 goto err_mem;
2926 }
2927
2928 /*
2929 * Allocate small buffer queue control blocks.
2930 */
2931 rx_ring->sbq =
2932 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2933 GFP_KERNEL);
2934 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002935 netif_err(qdev, ifup, qdev->ndev,
2936 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937 goto err_mem;
2938 }
2939
Ron Mercer4545a3f2009-02-23 10:42:17 +00002940 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941 }
2942
2943 if (rx_ring->lbq_len) {
2944 /*
2945 * Allocate large buffer queue.
2946 */
2947 rx_ring->lbq_base =
2948 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2949 &rx_ring->lbq_base_dma);
2950
2951 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002952 netif_err(qdev, ifup, qdev->ndev,
2953 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002954 goto err_mem;
2955 }
2956 /*
2957 * Allocate large buffer queue control blocks.
2958 */
2959 rx_ring->lbq =
2960 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2961 GFP_KERNEL);
2962 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002963 netif_err(qdev, ifup, qdev->ndev,
2964 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002965 goto err_mem;
2966 }
2967
Ron Mercer4545a3f2009-02-23 10:42:17 +00002968 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002969 }
2970
2971 return 0;
2972
2973err_mem:
2974 ql_free_rx_resources(qdev, rx_ring);
2975 return -ENOMEM;
2976}
2977
2978static void ql_tx_ring_clean(struct ql_adapter *qdev)
2979{
2980 struct tx_ring *tx_ring;
2981 struct tx_ring_desc *tx_ring_desc;
2982 int i, j;
2983
2984 /*
2985 * Loop through all queues and free
2986 * any resources.
2987 */
2988 for (j = 0; j < qdev->tx_ring_count; j++) {
2989 tx_ring = &qdev->tx_ring[j];
2990 for (i = 0; i < tx_ring->wq_len; i++) {
2991 tx_ring_desc = &tx_ring->q[i];
2992 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002993 netif_err(qdev, ifdown, qdev->ndev,
2994 "Freeing lost SKB %p, from queue %d, index %d.\n",
2995 tx_ring_desc->skb, j,
2996 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002997 ql_unmap_send(qdev, tx_ring_desc,
2998 tx_ring_desc->map_cnt);
2999 dev_kfree_skb(tx_ring_desc->skb);
3000 tx_ring_desc->skb = NULL;
3001 }
3002 }
3003 }
3004}
3005
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003006static void ql_free_mem_resources(struct ql_adapter *qdev)
3007{
3008 int i;
3009
3010 for (i = 0; i < qdev->tx_ring_count; i++)
3011 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3012 for (i = 0; i < qdev->rx_ring_count; i++)
3013 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3014 ql_free_shadow_space(qdev);
3015}
3016
3017static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3018{
3019 int i;
3020
3021 /* Allocate space for our shadow registers and such. */
3022 if (ql_alloc_shadow_space(qdev))
3023 return -ENOMEM;
3024
3025 for (i = 0; i < qdev->rx_ring_count; i++) {
3026 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003027 netif_err(qdev, ifup, qdev->ndev,
3028 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003029 goto err_mem;
3030 }
3031 }
3032 /* Allocate tx queue resources */
3033 for (i = 0; i < qdev->tx_ring_count; i++) {
3034 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003035 netif_err(qdev, ifup, qdev->ndev,
3036 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003037 goto err_mem;
3038 }
3039 }
3040 return 0;
3041
3042err_mem:
3043 ql_free_mem_resources(qdev);
3044 return -ENOMEM;
3045}
3046
3047/* Set up the rx ring control block and pass it to the chip.
3048 * The control block is defined as
3049 * "Completion Queue Initialization Control Block", or cqicb.
3050 */
3051static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3052{
3053 struct cqicb *cqicb = &rx_ring->cqicb;
3054 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003055 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003056 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003057 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003058 void __iomem *doorbell_area =
3059 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3060 int err = 0;
3061 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003062 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003063 __le64 *base_indirect_ptr;
3064 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003065
3066 /* Set up the shadow registers for this ring. */
3067 rx_ring->prod_idx_sh_reg = shadow_reg;
3068 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003069 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003070 shadow_reg += sizeof(u64);
3071 shadow_reg_dma += sizeof(u64);
3072 rx_ring->lbq_base_indirect = shadow_reg;
3073 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003074 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3075 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076 rx_ring->sbq_base_indirect = shadow_reg;
3077 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3078
3079 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003080 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003081 rx_ring->cnsmr_idx = 0;
3082 rx_ring->curr_entry = rx_ring->cq_base;
3083
3084 /* PCI doorbell mem area + 0x04 for valid register */
3085 rx_ring->valid_db_reg = doorbell_area + 0x04;
3086
3087 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003088 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003089
3090 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003091 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003092
3093 memset((void *)cqicb, 0, sizeof(struct cqicb));
3094 cqicb->msix_vect = rx_ring->irq;
3095
Ron Mercer459caf52009-01-04 17:08:11 -08003096 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3097 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003098
Ron Mercer97345522009-01-09 11:31:50 +00003099 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003100
Ron Mercer97345522009-01-09 11:31:50 +00003101 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003102
3103 /*
3104 * Set up the control block load flags.
3105 */
3106 cqicb->flags = FLAGS_LC | /* Load queue base address */
3107 FLAGS_LV | /* Load MSI-X vector */
3108 FLAGS_LI; /* Load irq delay values */
3109 if (rx_ring->lbq_len) {
3110 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003111 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003112 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003113 page_entries = 0;
3114 do {
3115 *base_indirect_ptr = cpu_to_le64(tmp);
3116 tmp += DB_PAGE_SIZE;
3117 base_indirect_ptr++;
3118 page_entries++;
3119 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003120 cqicb->lbq_addr =
3121 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003122 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3123 (u16) rx_ring->lbq_buf_size;
3124 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3125 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3126 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003127 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003128 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003129 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003130 rx_ring->lbq_clean_idx = 0;
3131 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003132 }
3133 if (rx_ring->sbq_len) {
3134 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003135 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003136 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003137 page_entries = 0;
3138 do {
3139 *base_indirect_ptr = cpu_to_le64(tmp);
3140 tmp += DB_PAGE_SIZE;
3141 base_indirect_ptr++;
3142 page_entries++;
3143 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003144 cqicb->sbq_addr =
3145 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003146 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003147 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003148 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3149 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003150 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003151 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003153 rx_ring->sbq_clean_idx = 0;
3154 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003155 }
3156 switch (rx_ring->type) {
3157 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3159 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3160 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003161 case RX_Q:
3162 /* Inbound completion handling rx_rings run in
3163 * separate NAPI contexts.
3164 */
3165 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3166 64);
3167 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3168 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3169 break;
3170 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003171 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3172 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003173 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003174 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3175 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003176 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3177 CFG_LCQ, rx_ring->cq_id);
3178 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003179 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003180 return err;
3181 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 return err;
3183}
3184
3185static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3186{
3187 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3188 void __iomem *doorbell_area =
3189 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3190 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3191 (tx_ring->wq_id * sizeof(u64));
3192 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3193 (tx_ring->wq_id * sizeof(u64));
3194 int err = 0;
3195
3196 /*
3197 * Assign doorbell registers for this tx_ring.
3198 */
3199 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003200 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003201 tx_ring->prod_idx = 0;
3202 /* TX PCI doorbell mem area + 0x04 */
3203 tx_ring->valid_db_reg = doorbell_area + 0x04;
3204
3205 /*
3206 * Assign shadow registers for this tx_ring.
3207 */
3208 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3209 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3210
3211 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3212 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3213 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3214 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3215 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003216 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003217
Ron Mercer97345522009-01-09 11:31:50 +00003218 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003219
3220 ql_init_tx_ring(qdev, tx_ring);
3221
Ron Mercere3324712009-07-02 06:06:13 +00003222 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003223 (u16) tx_ring->wq_id);
3224 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003225 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226 return err;
3227 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003228 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3229 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 return err;
3231}
3232
3233static void ql_disable_msix(struct ql_adapter *qdev)
3234{
3235 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3236 pci_disable_msix(qdev->pdev);
3237 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3238 kfree(qdev->msi_x_entry);
3239 qdev->msi_x_entry = NULL;
3240 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3241 pci_disable_msi(qdev->pdev);
3242 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3243 }
3244}
3245
Ron Mercera4ab6132009-08-27 11:02:10 +00003246/* We start by trying to get the number of vectors
3247 * stored in qdev->intr_count. If we don't get that
3248 * many then we reduce the count and try again.
3249 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003250static void ql_enable_msix(struct ql_adapter *qdev)
3251{
Ron Mercera4ab6132009-08-27 11:02:10 +00003252 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003253
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003254 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003255 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003256 /* Try to alloc space for the msix struct,
3257 * if it fails then go to MSI/legacy.
3258 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003259 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260 sizeof(struct msix_entry),
3261 GFP_KERNEL);
3262 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003263 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003264 goto msi;
3265 }
3266
Ron Mercera4ab6132009-08-27 11:02:10 +00003267 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003268 qdev->msi_x_entry[i].entry = i;
3269
Ron Mercera4ab6132009-08-27 11:02:10 +00003270 /* Loop to get our vectors. We start with
3271 * what we want and settle for what we get.
3272 */
3273 do {
3274 err = pci_enable_msix(qdev->pdev,
3275 qdev->msi_x_entry, qdev->intr_count);
3276 if (err > 0)
3277 qdev->intr_count = err;
3278 } while (err > 0);
3279
3280 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003281 kfree(qdev->msi_x_entry);
3282 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003283 netif_warn(qdev, ifup, qdev->ndev,
3284 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003285 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003286 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003287 } else if (err == 0) {
3288 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003289 netif_info(qdev, ifup, qdev->ndev,
3290 "MSI-X Enabled, got %d vectors.\n",
3291 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003292 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003293 }
3294 }
3295msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003296 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003297 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003298 if (!pci_enable_msi(qdev->pdev)) {
3299 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003300 netif_info(qdev, ifup, qdev->ndev,
3301 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003302 return;
3303 }
3304 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003305 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003306 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3307 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003308}
3309
Ron Mercer39aa8162009-08-27 11:02:11 +00003310/* Each vector services 1 RSS ring and and 1 or more
3311 * TX completion rings. This function loops through
3312 * the TX completion rings and assigns the vector that
3313 * will service it. An example would be if there are
3314 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3315 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003316 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003317 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3318 */
3319static void ql_set_tx_vect(struct ql_adapter *qdev)
3320{
3321 int i, j, vect;
3322 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3323
3324 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3325 /* Assign irq vectors to TX rx_rings.*/
3326 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3327 i < qdev->rx_ring_count; i++) {
3328 if (j == tx_rings_per_vector) {
3329 vect++;
3330 j = 0;
3331 }
3332 qdev->rx_ring[i].irq = vect;
3333 j++;
3334 }
3335 } else {
3336 /* For single vector all rings have an irq
3337 * of zero.
3338 */
3339 for (i = 0; i < qdev->rx_ring_count; i++)
3340 qdev->rx_ring[i].irq = 0;
3341 }
3342}
3343
3344/* Set the interrupt mask for this vector. Each vector
3345 * will service 1 RSS ring and 1 or more TX completion
3346 * rings. This function sets up a bit mask per vector
3347 * that indicates which rings it services.
3348 */
3349static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3350{
3351 int j, vect = ctx->intr;
3352 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3353
3354 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3355 /* Add the RSS ring serviced by this vector
3356 * to the mask.
3357 */
3358 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3359 /* Add the TX ring(s) serviced by this vector
3360 * to the mask. */
3361 for (j = 0; j < tx_rings_per_vector; j++) {
3362 ctx->irq_mask |=
3363 (1 << qdev->rx_ring[qdev->rss_ring_count +
3364 (vect * tx_rings_per_vector) + j].cq_id);
3365 }
3366 } else {
3367 /* For single vector we just shift each queue's
3368 * ID into the mask.
3369 */
3370 for (j = 0; j < qdev->rx_ring_count; j++)
3371 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3372 }
3373}
3374
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003375/*
3376 * Here we build the intr_context structures based on
3377 * our rx_ring count and intr vector count.
3378 * The intr_context structure is used to hook each vector
3379 * to possibly different handlers.
3380 */
3381static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3382{
3383 int i = 0;
3384 struct intr_context *intr_context = &qdev->intr_context[0];
3385
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003386 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3387 /* Each rx_ring has it's
3388 * own intr_context since we have separate
3389 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003390 */
3391 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3392 qdev->rx_ring[i].irq = i;
3393 intr_context->intr = i;
3394 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003395 /* Set up this vector's bit-mask that indicates
3396 * which queues it services.
3397 */
3398 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003399 /*
3400 * We set up each vectors enable/disable/read bits so
3401 * there's no bit/mask calculations in the critical path.
3402 */
3403 intr_context->intr_en_mask =
3404 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3405 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3406 | i;
3407 intr_context->intr_dis_mask =
3408 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3409 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3410 INTR_EN_IHD | i;
3411 intr_context->intr_read_mask =
3412 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3413 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3414 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003415 if (i == 0) {
3416 /* The first vector/queue handles
3417 * broadcast/multicast, fatal errors,
3418 * and firmware events. This in addition
3419 * to normal inbound NAPI processing.
3420 */
3421 intr_context->handler = qlge_isr;
3422 sprintf(intr_context->name, "%s-rx-%d",
3423 qdev->ndev->name, i);
3424 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003425 /*
3426 * Inbound queues handle unicast frames only.
3427 */
3428 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003429 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003430 qdev->ndev->name, i);
3431 }
3432 }
3433 } else {
3434 /*
3435 * All rx_rings use the same intr_context since
3436 * there is only one vector.
3437 */
3438 intr_context->intr = 0;
3439 intr_context->qdev = qdev;
3440 /*
3441 * We set up each vectors enable/disable/read bits so
3442 * there's no bit/mask calculations in the critical path.
3443 */
3444 intr_context->intr_en_mask =
3445 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3446 intr_context->intr_dis_mask =
3447 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3448 INTR_EN_TYPE_DISABLE;
3449 intr_context->intr_read_mask =
3450 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3451 /*
3452 * Single interrupt means one handler for all rings.
3453 */
3454 intr_context->handler = qlge_isr;
3455 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003456 /* Set up this vector's bit-mask that indicates
3457 * which queues it services. In this case there is
3458 * a single vector so it will service all RSS and
3459 * TX completion rings.
3460 */
3461 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003462 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003463 /* Tell the TX completion rings which MSIx vector
3464 * they will be using.
3465 */
3466 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003467}
3468
3469static void ql_free_irq(struct ql_adapter *qdev)
3470{
3471 int i;
3472 struct intr_context *intr_context = &qdev->intr_context[0];
3473
3474 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3475 if (intr_context->hooked) {
3476 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3477 free_irq(qdev->msi_x_entry[i].vector,
3478 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003479 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3480 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003481 } else {
3482 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003483 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3484 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003485 }
3486 }
3487 }
3488 ql_disable_msix(qdev);
3489}
3490
3491static int ql_request_irq(struct ql_adapter *qdev)
3492{
3493 int i;
3494 int status = 0;
3495 struct pci_dev *pdev = qdev->pdev;
3496 struct intr_context *intr_context = &qdev->intr_context[0];
3497
3498 ql_resolve_queues_to_irqs(qdev);
3499
3500 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3501 atomic_set(&intr_context->irq_cnt, 0);
3502 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3503 status = request_irq(qdev->msi_x_entry[i].vector,
3504 intr_context->handler,
3505 0,
3506 intr_context->name,
3507 &qdev->rx_ring[i]);
3508 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003509 netif_err(qdev, ifup, qdev->ndev,
3510 "Failed request for MSIX interrupt %d.\n",
3511 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003512 goto err_irq;
3513 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003514 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3515 "Hooked intr %d, queue type %s, with name %s.\n",
3516 i,
3517 qdev->rx_ring[i].type == DEFAULT_Q ?
3518 "DEFAULT_Q" :
3519 qdev->rx_ring[i].type == TX_Q ?
3520 "TX_Q" :
3521 qdev->rx_ring[i].type == RX_Q ?
3522 "RX_Q" : "",
3523 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003524 }
3525 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003526 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3527 "trying msi or legacy interrupts.\n");
3528 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3529 "%s: irq = %d.\n", __func__, pdev->irq);
3530 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3531 "%s: context->name = %s.\n", __func__,
3532 intr_context->name);
3533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3534 "%s: dev_id = 0x%p.\n", __func__,
3535 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003536 status =
3537 request_irq(pdev->irq, qlge_isr,
3538 test_bit(QL_MSI_ENABLED,
3539 &qdev->
3540 flags) ? 0 : IRQF_SHARED,
3541 intr_context->name, &qdev->rx_ring[0]);
3542 if (status)
3543 goto err_irq;
3544
Joe Perchesae9540f72010-02-09 11:49:52 +00003545 netif_err(qdev, ifup, qdev->ndev,
3546 "Hooked intr %d, queue type %s, with name %s.\n",
3547 i,
3548 qdev->rx_ring[0].type == DEFAULT_Q ?
3549 "DEFAULT_Q" :
3550 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3551 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3552 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553 }
3554 intr_context->hooked = 1;
3555 }
3556 return status;
3557err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003558 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 ql_free_irq(qdev);
3560 return status;
3561}
3562
3563static int ql_start_rss(struct ql_adapter *qdev)
3564{
Joe Perches215faf92010-12-21 02:16:10 -08003565 static const u8 init_hash_seed[] = {
3566 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3567 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3568 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3569 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3570 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3571 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003572 struct ricb *ricb = &qdev->ricb;
3573 int status = 0;
3574 int i;
3575 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3576
Ron Mercere3324712009-07-02 06:06:13 +00003577 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003578
Ron Mercerb2014ff2009-08-27 11:02:09 +00003579 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003580 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003581 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3582 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003583
3584 /*
3585 * Fill out the Indirection Table.
3586 */
Ron Mercer541ae282009-10-08 09:54:37 +00003587 for (i = 0; i < 1024; i++)
3588 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003589
Ron Mercer541ae282009-10-08 09:54:37 +00003590 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3591 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003592
Joe Perchesae9540f72010-02-09 11:49:52 +00003593 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003594
Ron Mercere3324712009-07-02 06:06:13 +00003595 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003596 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003597 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598 return status;
3599 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003600 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3601 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003602 return status;
3603}
3604
Ron Mercera5f59dc2009-07-02 06:06:07 +00003605static int ql_clear_routing_entries(struct ql_adapter *qdev)
3606{
3607 int i, status = 0;
3608
3609 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3610 if (status)
3611 return status;
3612 /* Clear all the entries in the routing table. */
3613 for (i = 0; i < 16; i++) {
3614 status = ql_set_routing_reg(qdev, i, 0, 0);
3615 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003616 netif_err(qdev, ifup, qdev->ndev,
3617 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003618 break;
3619 }
3620 }
3621 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3622 return status;
3623}
3624
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003625/* Initialize the frame-to-queue routing. */
3626static int ql_route_initialize(struct ql_adapter *qdev)
3627{
3628 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003629
3630 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003631 status = ql_clear_routing_entries(qdev);
3632 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003633 return status;
3634
3635 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3636 if (status)
3637 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003638
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003639 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3640 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003642 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003643 "Failed to init routing register "
3644 "for IP CSUM error packets.\n");
3645 goto exit;
3646 }
3647 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3648 RT_IDX_TU_CSUM_ERR, 1);
3649 if (status) {
3650 netif_err(qdev, ifup, qdev->ndev,
3651 "Failed to init routing register "
3652 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003653 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003654 }
3655 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3656 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003657 netif_err(qdev, ifup, qdev->ndev,
3658 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003659 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003660 }
3661 /* If we have more than one inbound queue, then turn on RSS in the
3662 * routing block.
3663 */
3664 if (qdev->rss_ring_count > 1) {
3665 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3666 RT_IDX_RSS_MATCH, 1);
3667 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003668 netif_err(qdev, ifup, qdev->ndev,
3669 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003670 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003671 }
3672 }
3673
3674 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3675 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003676 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003677 netif_err(qdev, ifup, qdev->ndev,
3678 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003679exit:
3680 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003681 return status;
3682}
3683
Ron Mercer2ee1e272009-03-03 12:10:33 +00003684int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003685{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003686 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003687
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003688 /* If check if the link is up and use to
3689 * determine if we are setting or clearing
3690 * the MAC address in the CAM.
3691 */
3692 set = ql_read32(qdev, STS);
3693 set &= qdev->port_link_up;
3694 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003695 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003696 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003697 return status;
3698 }
3699
3700 status = ql_route_initialize(qdev);
3701 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003702 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003703
3704 return status;
3705}
3706
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003707static int ql_adapter_initialize(struct ql_adapter *qdev)
3708{
3709 u32 value, mask;
3710 int i;
3711 int status = 0;
3712
3713 /*
3714 * Set up the System register to halt on errors.
3715 */
3716 value = SYS_EFE | SYS_FAE;
3717 mask = value << 16;
3718 ql_write32(qdev, SYS, mask | value);
3719
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003720 /* Set the default queue, and VLAN behavior. */
3721 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3722 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003723 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3724
3725 /* Set the MPI interrupt to enabled. */
3726 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3727
3728 /* Enable the function, set pagesize, enable error checking. */
3729 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003730 FSC_EC | FSC_VM_PAGE_4K;
3731 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003732
3733 /* Set/clear header splitting. */
3734 mask = FSC_VM_PAGESIZE_MASK |
3735 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3736 ql_write32(qdev, FSC, mask | value);
3737
Ron Mercer572c5262010-01-02 10:37:42 +00003738 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003739
Ron Mercera3b71932009-10-08 09:54:38 +00003740 /* Set RX packet routing to use port/pci function on which the
3741 * packet arrived on in addition to usual frame routing.
3742 * This is helpful on bonding where both interfaces can have
3743 * the same MAC address.
3744 */
3745 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003746 /* Reroute all packets to our Interface.
3747 * They may have been routed to MPI firmware
3748 * due to WOL.
3749 */
3750 value = ql_read32(qdev, MGMT_RCV_CFG);
3751 value &= ~MGMT_RCV_CFG_RM;
3752 mask = 0xffff0000;
3753
3754 /* Sticky reg needs clearing due to WOL. */
3755 ql_write32(qdev, MGMT_RCV_CFG, mask);
3756 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3757
3758 /* Default WOL is enable on Mezz cards */
3759 if (qdev->pdev->subsystem_device == 0x0068 ||
3760 qdev->pdev->subsystem_device == 0x0180)
3761 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003762
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003763 /* Start up the rx queues. */
3764 for (i = 0; i < qdev->rx_ring_count; i++) {
3765 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003767 netif_err(qdev, ifup, qdev->ndev,
3768 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003769 return status;
3770 }
3771 }
3772
3773 /* If there is more than one inbound completion queue
3774 * then download a RICB to configure RSS.
3775 */
3776 if (qdev->rss_ring_count > 1) {
3777 status = ql_start_rss(qdev);
3778 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003779 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003780 return status;
3781 }
3782 }
3783
3784 /* Start up the tx queues. */
3785 for (i = 0; i < qdev->tx_ring_count; i++) {
3786 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3787 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003788 netif_err(qdev, ifup, qdev->ndev,
3789 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003790 return status;
3791 }
3792 }
3793
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003794 /* Initialize the port and set the max framesize. */
3795 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003796 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003797 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003798
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003799 /* Set up the MAC address and frame routing filter. */
3800 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003801 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003802 netif_err(qdev, ifup, qdev->ndev,
3803 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003804 return status;
3805 }
3806
3807 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003808 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003809 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3810 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003811 napi_enable(&qdev->rx_ring[i].napi);
3812 }
3813
3814 return status;
3815}
3816
3817/* Issue soft reset to chip. */
3818static int ql_adapter_reset(struct ql_adapter *qdev)
3819{
3820 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003821 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003822 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003823
Ron Mercera5f59dc2009-07-02 06:06:07 +00003824 /* Clear all the entries in the routing table. */
3825 status = ql_clear_routing_entries(qdev);
3826 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003827 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003828 return status;
3829 }
3830
3831 end_jiffies = jiffies +
3832 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003833
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003834 /* Check if bit is set then skip the mailbox command and
3835 * clear the bit, else we are in normal reset process.
3836 */
3837 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3838 /* Stop management traffic. */
3839 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003840
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003841 /* Wait for the NIC and MGMNT FIFOs to empty. */
3842 ql_wait_fifo_empty(qdev);
3843 } else
3844 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003845
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003846 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003847
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003848 do {
3849 value = ql_read32(qdev, RST_FO);
3850 if ((value & RST_FO_FR) == 0)
3851 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003852 cpu_relax();
3853 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003854
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003855 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003856 netif_err(qdev, ifdown, qdev->ndev,
3857 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003858 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003859 }
3860
Ron Mercer84087f42009-10-08 09:54:41 +00003861 /* Resume management traffic. */
3862 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003863 return status;
3864}
3865
3866static void ql_display_dev_info(struct net_device *ndev)
3867{
Joe Perchesb16fed02010-11-15 11:12:28 +00003868 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003869
Joe Perchesae9540f72010-02-09 11:49:52 +00003870 netif_info(qdev, probe, qdev->ndev,
3871 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3872 "XG Roll = %d, XG Rev = %d.\n",
3873 qdev->func,
3874 qdev->port,
3875 qdev->chip_rev_id & 0x0000000f,
3876 qdev->chip_rev_id >> 4 & 0x0000000f,
3877 qdev->chip_rev_id >> 8 & 0x0000000f,
3878 qdev->chip_rev_id >> 12 & 0x0000000f);
3879 netif_info(qdev, probe, qdev->ndev,
3880 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003881}
3882
stephen hemmingerac409212010-10-21 07:50:54 +00003883static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003884{
3885 int status = 0;
3886 u32 wol = MB_WOL_DISABLE;
3887
3888 /* The CAM is still intact after a reset, but if we
3889 * are doing WOL, then we may need to program the
3890 * routing regs. We would also need to issue the mailbox
3891 * commands to instruct the MPI what to do per the ethtool
3892 * settings.
3893 */
3894
3895 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3896 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003897 netif_err(qdev, ifdown, qdev->ndev,
3898 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3899 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003900 return -EINVAL;
3901 }
3902
3903 if (qdev->wol & WAKE_MAGIC) {
3904 status = ql_mb_wol_set_magic(qdev, 1);
3905 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003906 netif_err(qdev, ifdown, qdev->ndev,
3907 "Failed to set magic packet on %s.\n",
3908 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003909 return status;
3910 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003911 netif_info(qdev, drv, qdev->ndev,
3912 "Enabled magic packet successfully on %s.\n",
3913 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003914
3915 wol |= MB_WOL_MAGIC_PKT;
3916 }
3917
3918 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003919 wol |= MB_WOL_MODE_ON;
3920 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003921 netif_err(qdev, drv, qdev->ndev,
3922 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003923 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003924 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003925 }
3926
3927 return status;
3928}
3929
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003930static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003931{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003932
Ron Mercer6497b602009-02-12 16:37:13 -08003933 /* Don't kill the reset worker thread if we
3934 * are in the process of recovery.
3935 */
3936 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3937 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003938 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3939 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003940 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003941 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003942 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003943}
3944
3945static int ql_adapter_down(struct ql_adapter *qdev)
3946{
3947 int i, status = 0;
3948
3949 ql_link_off(qdev);
3950
3951 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003952
Ron Mercer39aa8162009-08-27 11:02:11 +00003953 for (i = 0; i < qdev->rss_ring_count; i++)
3954 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003955
3956 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3957
3958 ql_disable_interrupts(qdev);
3959
3960 ql_tx_ring_clean(qdev);
3961
Ron Mercer6b318cb2009-03-09 10:59:26 +00003962 /* Call netif_napi_del() from common point.
3963 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003964 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003965 netif_napi_del(&qdev->rx_ring[i].napi);
3966
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003967 status = ql_adapter_reset(qdev);
3968 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003969 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3970 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003971 ql_free_rx_buffers(qdev);
3972
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003973 return status;
3974}
3975
3976static int ql_adapter_up(struct ql_adapter *qdev)
3977{
3978 int err = 0;
3979
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003980 err = ql_adapter_initialize(qdev);
3981 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003982 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003983 goto err_init;
3984 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003985 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003986 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003987 /* If the port is initialized and the
3988 * link is up the turn on the carrier.
3989 */
3990 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3991 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003992 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003993 /* Restore rx mode. */
3994 clear_bit(QL_ALLMULTI, &qdev->flags);
3995 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3996 qlge_set_multicast_list(qdev->ndev);
3997
Ron Mercerc1b60092010-10-27 04:58:12 +00003998 /* Restore vlan setting. */
3999 qlge_restore_vlan(qdev);
4000
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004001 ql_enable_interrupts(qdev);
4002 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004003 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004004
4005 return 0;
4006err_init:
4007 ql_adapter_reset(qdev);
4008 return err;
4009}
4010
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004011static void ql_release_adapter_resources(struct ql_adapter *qdev)
4012{
4013 ql_free_mem_resources(qdev);
4014 ql_free_irq(qdev);
4015}
4016
4017static int ql_get_adapter_resources(struct ql_adapter *qdev)
4018{
4019 int status = 0;
4020
4021 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004022 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004023 return -ENOMEM;
4024 }
4025 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004026 return status;
4027}
4028
4029static int qlge_close(struct net_device *ndev)
4030{
4031 struct ql_adapter *qdev = netdev_priv(ndev);
4032
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004033 /* If we hit pci_channel_io_perm_failure
4034 * failure condition, then we already
4035 * brought the adapter down.
4036 */
4037 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004038 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004039 clear_bit(QL_EEH_FATAL, &qdev->flags);
4040 return 0;
4041 }
4042
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004043 /*
4044 * Wait for device to recover from a reset.
4045 * (Rarely happens, but possible.)
4046 */
4047 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4048 msleep(1);
4049 ql_adapter_down(qdev);
4050 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004051 return 0;
4052}
4053
4054static int ql_configure_rings(struct ql_adapter *qdev)
4055{
4056 int i;
4057 struct rx_ring *rx_ring;
4058 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004059 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004060 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4061 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4062
4063 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004064
Ron Mercera4ab6132009-08-27 11:02:10 +00004065 /* In a perfect world we have one RSS ring for each CPU
4066 * and each has it's own vector. To do that we ask for
4067 * cpu_cnt vectors. ql_enable_msix() will adjust the
4068 * vector count to what we actually get. We then
4069 * allocate an RSS ring for each.
4070 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004071 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004072 qdev->intr_count = cpu_cnt;
4073 ql_enable_msix(qdev);
4074 /* Adjust the RSS ring count to the actual vector count. */
4075 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004076 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004077 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004078
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004079 for (i = 0; i < qdev->tx_ring_count; i++) {
4080 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004081 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004082 tx_ring->qdev = qdev;
4083 tx_ring->wq_id = i;
4084 tx_ring->wq_len = qdev->tx_ring_size;
4085 tx_ring->wq_size =
4086 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4087
4088 /*
4089 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004090 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004091 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004092 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004093 }
4094
4095 for (i = 0; i < qdev->rx_ring_count; i++) {
4096 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004097 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004098 rx_ring->qdev = qdev;
4099 rx_ring->cq_id = i;
4100 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004101 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004102 /*
4103 * Inbound (RSS) queues.
4104 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004105 rx_ring->cq_len = qdev->rx_ring_size;
4106 rx_ring->cq_size =
4107 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4108 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4109 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004110 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004111 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004112 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4113 "lbq_buf_size %d, order = %d\n",
4114 rx_ring->lbq_buf_size,
4115 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004116 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4117 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004118 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004119 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004120 rx_ring->type = RX_Q;
4121 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004122 /*
4123 * Outbound queue handles outbound completions only.
4124 */
4125 /* outbound cq is same size as tx_ring it services. */
4126 rx_ring->cq_len = qdev->tx_ring_size;
4127 rx_ring->cq_size =
4128 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4129 rx_ring->lbq_len = 0;
4130 rx_ring->lbq_size = 0;
4131 rx_ring->lbq_buf_size = 0;
4132 rx_ring->sbq_len = 0;
4133 rx_ring->sbq_size = 0;
4134 rx_ring->sbq_buf_size = 0;
4135 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004136 }
4137 }
4138 return 0;
4139}
4140
4141static int qlge_open(struct net_device *ndev)
4142{
4143 int err = 0;
4144 struct ql_adapter *qdev = netdev_priv(ndev);
4145
Ron Mercer74e12432009-11-11 12:54:04 +00004146 err = ql_adapter_reset(qdev);
4147 if (err)
4148 return err;
4149
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004150 err = ql_configure_rings(qdev);
4151 if (err)
4152 return err;
4153
4154 err = ql_get_adapter_resources(qdev);
4155 if (err)
4156 goto error_up;
4157
4158 err = ql_adapter_up(qdev);
4159 if (err)
4160 goto error_up;
4161
4162 return err;
4163
4164error_up:
4165 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004166 return err;
4167}
4168
Ron Mercer7c734352009-10-19 03:32:19 +00004169static int ql_change_rx_buffers(struct ql_adapter *qdev)
4170{
4171 struct rx_ring *rx_ring;
4172 int i, status;
4173 u32 lbq_buf_len;
4174
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004175 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004176 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4177 int i = 3;
4178 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004179 netif_err(qdev, ifup, qdev->ndev,
4180 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004181 ssleep(1);
4182 }
4183
4184 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004185 netif_err(qdev, ifup, qdev->ndev,
4186 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004187 return -ETIMEDOUT;
4188 }
4189 }
4190
4191 status = ql_adapter_down(qdev);
4192 if (status)
4193 goto error;
4194
4195 /* Get the new rx buffer size. */
4196 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4197 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4198 qdev->lbq_buf_order = get_order(lbq_buf_len);
4199
4200 for (i = 0; i < qdev->rss_ring_count; i++) {
4201 rx_ring = &qdev->rx_ring[i];
4202 /* Set the new size. */
4203 rx_ring->lbq_buf_size = lbq_buf_len;
4204 }
4205
4206 status = ql_adapter_up(qdev);
4207 if (status)
4208 goto error;
4209
4210 return status;
4211error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004212 netif_alert(qdev, ifup, qdev->ndev,
4213 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004214 set_bit(QL_ADAPTER_UP, &qdev->flags);
4215 dev_close(qdev->ndev);
4216 return status;
4217}
4218
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004219static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4220{
4221 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004222 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004223
4224 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004225 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004226 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004227 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004228 } else
4229 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004230
4231 queue_delayed_work(qdev->workqueue,
4232 &qdev->mpi_port_cfg_work, 3*HZ);
4233
Breno Leitao746079d2010-02-04 10:11:19 +00004234 ndev->mtu = new_mtu;
4235
Ron Mercer7c734352009-10-19 03:32:19 +00004236 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004237 return 0;
4238 }
4239
Ron Mercer7c734352009-10-19 03:32:19 +00004240 status = ql_change_rx_buffers(qdev);
4241 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004242 netif_err(qdev, ifup, qdev->ndev,
4243 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004244 }
4245
4246 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004247}
4248
4249static struct net_device_stats *qlge_get_stats(struct net_device
4250 *ndev)
4251{
Ron Mercer885ee392009-11-03 13:49:31 +00004252 struct ql_adapter *qdev = netdev_priv(ndev);
4253 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4254 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4255 unsigned long pkts, mcast, dropped, errors, bytes;
4256 int i;
4257
4258 /* Get RX stats. */
4259 pkts = mcast = dropped = errors = bytes = 0;
4260 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4261 pkts += rx_ring->rx_packets;
4262 bytes += rx_ring->rx_bytes;
4263 dropped += rx_ring->rx_dropped;
4264 errors += rx_ring->rx_errors;
4265 mcast += rx_ring->rx_multicast;
4266 }
4267 ndev->stats.rx_packets = pkts;
4268 ndev->stats.rx_bytes = bytes;
4269 ndev->stats.rx_dropped = dropped;
4270 ndev->stats.rx_errors = errors;
4271 ndev->stats.multicast = mcast;
4272
4273 /* Get TX stats. */
4274 pkts = errors = bytes = 0;
4275 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4276 pkts += tx_ring->tx_packets;
4277 bytes += tx_ring->tx_bytes;
4278 errors += tx_ring->tx_errors;
4279 }
4280 ndev->stats.tx_packets = pkts;
4281 ndev->stats.tx_bytes = bytes;
4282 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004283 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004284}
4285
stephen hemmingerac409212010-10-21 07:50:54 +00004286static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004287{
Joe Perchesb16fed02010-11-15 11:12:28 +00004288 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004289 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004290 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004291
Ron Mercercc288f52009-02-23 10:42:14 +00004292 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4293 if (status)
4294 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004295 /*
4296 * Set or clear promiscuous mode if a
4297 * transition is taking place.
4298 */
4299 if (ndev->flags & IFF_PROMISC) {
4300 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4301 if (ql_set_routing_reg
4302 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004303 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004304 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004305 } else {
4306 set_bit(QL_PROMISCUOUS, &qdev->flags);
4307 }
4308 }
4309 } else {
4310 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4311 if (ql_set_routing_reg
4312 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004313 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004314 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004315 } else {
4316 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4317 }
4318 }
4319 }
4320
4321 /*
4322 * Set or clear all multicast mode if a
4323 * transition is taking place.
4324 */
4325 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004326 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4328 if (ql_set_routing_reg
4329 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004330 netif_err(qdev, hw, qdev->ndev,
4331 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004332 } else {
4333 set_bit(QL_ALLMULTI, &qdev->flags);
4334 }
4335 }
4336 } else {
4337 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4338 if (ql_set_routing_reg
4339 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004340 netif_err(qdev, hw, qdev->ndev,
4341 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004342 } else {
4343 clear_bit(QL_ALLMULTI, &qdev->flags);
4344 }
4345 }
4346 }
4347
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004348 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004349 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4350 if (status)
4351 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004352 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004353 netdev_for_each_mc_addr(ha, ndev) {
4354 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004355 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004356 netif_err(qdev, hw, qdev->ndev,
4357 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004358 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004359 goto exit;
4360 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004361 i++;
4362 }
Ron Mercercc288f52009-02-23 10:42:14 +00004363 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004364 if (ql_set_routing_reg
4365 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004366 netif_err(qdev, hw, qdev->ndev,
4367 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004368 } else {
4369 set_bit(QL_ALLMULTI, &qdev->flags);
4370 }
4371 }
4372exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004373 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004374}
4375
4376static int qlge_set_mac_address(struct net_device *ndev, void *p)
4377{
Joe Perchesb16fed02010-11-15 11:12:28 +00004378 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004379 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004380 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004381
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004382 if (!is_valid_ether_addr(addr->sa_data))
4383 return -EADDRNOTAVAIL;
4384 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004385 /* Update local copy of current mac address. */
4386 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004387
Ron Mercercc288f52009-02-23 10:42:14 +00004388 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4389 if (status)
4390 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004391 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4392 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004393 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004394 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004395 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4396 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004397}
4398
4399static void qlge_tx_timeout(struct net_device *ndev)
4400{
Joe Perchesb16fed02010-11-15 11:12:28 +00004401 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004402 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004403}
4404
4405static void ql_asic_reset_work(struct work_struct *work)
4406{
4407 struct ql_adapter *qdev =
4408 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004409 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004410 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004411 status = ql_adapter_down(qdev);
4412 if (status)
4413 goto error;
4414
4415 status = ql_adapter_up(qdev);
4416 if (status)
4417 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004418
4419 /* Restore rx mode. */
4420 clear_bit(QL_ALLMULTI, &qdev->flags);
4421 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4422 qlge_set_multicast_list(qdev->ndev);
4423
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004424 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004425 return;
4426error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004427 netif_alert(qdev, ifup, qdev->ndev,
4428 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004429
Ron Mercerdb988122009-03-09 10:59:17 +00004430 set_bit(QL_ADAPTER_UP, &qdev->flags);
4431 dev_close(qdev->ndev);
4432 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004433}
4434
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004435static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004436 .get_flash = ql_get_8012_flash_params,
4437 .port_initialize = ql_8012_port_initialize,
4438};
4439
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004440static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004441 .get_flash = ql_get_8000_flash_params,
4442 .port_initialize = ql_8000_port_initialize,
4443};
4444
Ron Mercere4552f52009-06-09 05:39:32 +00004445/* Find the pcie function number for the other NIC
4446 * on this chip. Since both NIC functions share a
4447 * common firmware we have the lowest enabled function
4448 * do any common work. Examples would be resetting
4449 * after a fatal firmware error, or doing a firmware
4450 * coredump.
4451 */
4452static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004453{
Ron Mercere4552f52009-06-09 05:39:32 +00004454 int status = 0;
4455 u32 temp;
4456 u32 nic_func1, nic_func2;
4457
4458 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4459 &temp);
4460 if (status)
4461 return status;
4462
4463 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4464 MPI_TEST_NIC_FUNC_MASK);
4465 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4466 MPI_TEST_NIC_FUNC_MASK);
4467
4468 if (qdev->func == nic_func1)
4469 qdev->alt_func = nic_func2;
4470 else if (qdev->func == nic_func2)
4471 qdev->alt_func = nic_func1;
4472 else
4473 status = -EIO;
4474
4475 return status;
4476}
4477
4478static int ql_get_board_info(struct ql_adapter *qdev)
4479{
4480 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004481 qdev->func =
4482 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004483 if (qdev->func > 3)
4484 return -EIO;
4485
4486 status = ql_get_alt_pcie_func(qdev);
4487 if (status)
4488 return status;
4489
4490 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4491 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004492 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4493 qdev->port_link_up = STS_PL1;
4494 qdev->port_init = STS_PI1;
4495 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4496 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4497 } else {
4498 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4499 qdev->port_link_up = STS_PL0;
4500 qdev->port_init = STS_PI0;
4501 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4502 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4503 }
4504 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004505 qdev->device_id = qdev->pdev->device;
4506 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4507 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004508 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4509 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004510 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004511}
4512
4513static void ql_release_all(struct pci_dev *pdev)
4514{
4515 struct net_device *ndev = pci_get_drvdata(pdev);
4516 struct ql_adapter *qdev = netdev_priv(ndev);
4517
4518 if (qdev->workqueue) {
4519 destroy_workqueue(qdev->workqueue);
4520 qdev->workqueue = NULL;
4521 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004522
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004523 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004524 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004525 if (qdev->doorbell_area)
4526 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004527 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004528 pci_release_regions(pdev);
4529 pci_set_drvdata(pdev, NULL);
4530}
4531
4532static int __devinit ql_init_device(struct pci_dev *pdev,
4533 struct net_device *ndev, int cards_found)
4534{
4535 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004536 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004537
Ron Mercere3324712009-07-02 06:06:13 +00004538 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004539 err = pci_enable_device(pdev);
4540 if (err) {
4541 dev_err(&pdev->dev, "PCI device enable failed.\n");
4542 return err;
4543 }
4544
Ron Mercerebd6e772009-09-29 08:39:25 +00004545 qdev->ndev = ndev;
4546 qdev->pdev = pdev;
4547 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004548
Ron Mercerbc9167f2009-10-10 09:35:04 +00004549 /* Set PCIe read request size */
4550 err = pcie_set_readrq(pdev, 4096);
4551 if (err) {
4552 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004553 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004554 }
4555
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004556 err = pci_request_regions(pdev, DRV_NAME);
4557 if (err) {
4558 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004559 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004560 }
4561
4562 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004563 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004564 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004565 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004567 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004568 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004569 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004570 }
4571
4572 if (err) {
4573 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004574 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004575 }
4576
Ron Mercer73475332009-11-06 07:44:58 +00004577 /* Set PCIe reset type for EEH to fundamental. */
4578 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004579 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004580 qdev->reg_base =
4581 ioremap_nocache(pci_resource_start(pdev, 1),
4582 pci_resource_len(pdev, 1));
4583 if (!qdev->reg_base) {
4584 dev_err(&pdev->dev, "Register mapping failed.\n");
4585 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004586 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 }
4588
4589 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4590 qdev->doorbell_area =
4591 ioremap_nocache(pci_resource_start(pdev, 3),
4592 pci_resource_len(pdev, 3));
4593 if (!qdev->doorbell_area) {
4594 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4595 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004596 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004597 }
4598
Ron Mercere4552f52009-06-09 05:39:32 +00004599 err = ql_get_board_info(qdev);
4600 if (err) {
4601 dev_err(&pdev->dev, "Register access failed.\n");
4602 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004603 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004604 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004605 qdev->msg_enable = netif_msg_init(debug, default_msg);
4606 spin_lock_init(&qdev->hw_lock);
4607 spin_lock_init(&qdev->stats_lock);
4608
Ron Mercer8aae2602010-01-15 13:31:28 +00004609 if (qlge_mpi_coredump) {
4610 qdev->mpi_coredump =
4611 vmalloc(sizeof(struct ql_mpi_coredump));
4612 if (qdev->mpi_coredump == NULL) {
4613 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4614 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004615 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004616 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004617 if (qlge_force_coredump)
4618 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004619 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004620 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004621 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004622 if (err) {
4623 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004624 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004625 }
4626
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004627 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004628 /* Keep local copy of current mac address. */
4629 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004630
4631 /* Set up the default ring sizes. */
4632 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4633 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4634
4635 /* Set up the coalescing parameters. */
4636 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4637 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4638 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4639 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4640
4641 /*
4642 * Set up the operating parameters.
4643 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004644 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4645 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4646 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4647 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004648 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004649 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004650 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004651 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004652 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004653
4654 if (!cards_found) {
4655 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4656 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4657 DRV_NAME, DRV_VERSION);
4658 }
4659 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004660err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004661 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004662err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004663 pci_disable_device(pdev);
4664 return err;
4665}
4666
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004667static const struct net_device_ops qlge_netdev_ops = {
4668 .ndo_open = qlge_open,
4669 .ndo_stop = qlge_close,
4670 .ndo_start_xmit = qlge_send,
4671 .ndo_change_mtu = qlge_change_mtu,
4672 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004673 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004674 .ndo_set_mac_address = qlge_set_mac_address,
4675 .ndo_validate_addr = eth_validate_addr,
4676 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004677 .ndo_fix_features = qlge_fix_features,
4678 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004679 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4680 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004681};
4682
Ron Mercer15c052f2010-02-04 13:32:46 -08004683static void ql_timer(unsigned long data)
4684{
4685 struct ql_adapter *qdev = (struct ql_adapter *)data;
4686 u32 var = 0;
4687
4688 var = ql_read32(qdev, STS);
4689 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004690 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004691 return;
4692 }
4693
Breno Leitao72046d82010-07-01 03:00:17 +00004694 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004695}
4696
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004697static int __devinit qlge_probe(struct pci_dev *pdev,
4698 const struct pci_device_id *pci_entry)
4699{
4700 struct net_device *ndev = NULL;
4701 struct ql_adapter *qdev = NULL;
4702 static int cards_found = 0;
4703 int err = 0;
4704
Ron Mercer1e213302009-03-09 10:59:21 +00004705 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4706 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004707 if (!ndev)
4708 return -ENOMEM;
4709
4710 err = ql_init_device(pdev, ndev, cards_found);
4711 if (err < 0) {
4712 free_netdev(ndev);
4713 return err;
4714 }
4715
4716 qdev = netdev_priv(ndev);
4717 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004718 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4719 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4720 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4721 ndev->features = ndev->hw_features |
4722 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004723
4724 if (test_bit(QL_DMA64, &qdev->flags))
4725 ndev->features |= NETIF_F_HIGHDMA;
4726
4727 /*
4728 * Set up net_device structure.
4729 */
4730 ndev->tx_queue_len = qdev->tx_ring_size;
4731 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004732
4733 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004734 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004735 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004736
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004737 err = register_netdev(ndev);
4738 if (err) {
4739 dev_err(&pdev->dev, "net device registration failed.\n");
4740 ql_release_all(pdev);
4741 pci_disable_device(pdev);
4742 return err;
4743 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004744 /* Start up the timer to trigger EEH if
4745 * the bus goes dead
4746 */
4747 init_timer_deferrable(&qdev->timer);
4748 qdev->timer.data = (unsigned long)qdev;
4749 qdev->timer.function = ql_timer;
4750 qdev->timer.expires = jiffies + (5*HZ);
4751 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004752 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004753 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004754 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004755 cards_found++;
4756 return 0;
4757}
4758
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004759netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4760{
4761 return qlge_send(skb, ndev);
4762}
4763
4764int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4765{
4766 return ql_clean_inbound_rx_ring(rx_ring, budget);
4767}
4768
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004769static void __devexit qlge_remove(struct pci_dev *pdev)
4770{
4771 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004772 struct ql_adapter *qdev = netdev_priv(ndev);
4773 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004774 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004775 unregister_netdev(ndev);
4776 ql_release_all(pdev);
4777 pci_disable_device(pdev);
4778 free_netdev(ndev);
4779}
4780
Ron Mercer6d190c62009-10-28 08:39:20 +00004781/* Clean up resources without touching hardware. */
4782static void ql_eeh_close(struct net_device *ndev)
4783{
4784 int i;
4785 struct ql_adapter *qdev = netdev_priv(ndev);
4786
4787 if (netif_carrier_ok(ndev)) {
4788 netif_carrier_off(ndev);
4789 netif_stop_queue(ndev);
4790 }
4791
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004792 /* Disabling the timer */
4793 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004794 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004795
4796 for (i = 0; i < qdev->rss_ring_count; i++)
4797 netif_napi_del(&qdev->rx_ring[i].napi);
4798
4799 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4800 ql_tx_ring_clean(qdev);
4801 ql_free_rx_buffers(qdev);
4802 ql_release_adapter_resources(qdev);
4803}
4804
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004805/*
4806 * This callback is called by the PCI subsystem whenever
4807 * a PCI bus error is detected.
4808 */
4809static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4810 enum pci_channel_state state)
4811{
4812 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004813 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004814
Ron Mercer6d190c62009-10-28 08:39:20 +00004815 switch (state) {
4816 case pci_channel_io_normal:
4817 return PCI_ERS_RESULT_CAN_RECOVER;
4818 case pci_channel_io_frozen:
4819 netif_device_detach(ndev);
4820 if (netif_running(ndev))
4821 ql_eeh_close(ndev);
4822 pci_disable_device(pdev);
4823 return PCI_ERS_RESULT_NEED_RESET;
4824 case pci_channel_io_perm_failure:
4825 dev_err(&pdev->dev,
4826 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004827 ql_eeh_close(ndev);
4828 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004829 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004830 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004831
4832 /* Request a slot reset. */
4833 return PCI_ERS_RESULT_NEED_RESET;
4834}
4835
4836/*
4837 * This callback is called after the PCI buss has been reset.
4838 * Basically, this tries to restart the card from scratch.
4839 * This is a shortened version of the device probe/discovery code,
4840 * it resembles the first-half of the () routine.
4841 */
4842static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4843{
4844 struct net_device *ndev = pci_get_drvdata(pdev);
4845 struct ql_adapter *qdev = netdev_priv(ndev);
4846
Ron Mercer6d190c62009-10-28 08:39:20 +00004847 pdev->error_state = pci_channel_io_normal;
4848
4849 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004850 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004851 netif_err(qdev, ifup, qdev->ndev,
4852 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004853 return PCI_ERS_RESULT_DISCONNECT;
4854 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004855 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004856
4857 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004858 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004859 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004860 return PCI_ERS_RESULT_DISCONNECT;
4861 }
4862
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004863 return PCI_ERS_RESULT_RECOVERED;
4864}
4865
4866static void qlge_io_resume(struct pci_dev *pdev)
4867{
4868 struct net_device *ndev = pci_get_drvdata(pdev);
4869 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004870 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004871
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004872 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004873 err = qlge_open(ndev);
4874 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004875 netif_err(qdev, ifup, qdev->ndev,
4876 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004877 return;
4878 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004879 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004880 netif_err(qdev, ifup, qdev->ndev,
4881 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004882 }
Breno Leitao72046d82010-07-01 03:00:17 +00004883 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004884 netif_device_attach(ndev);
4885}
4886
4887static struct pci_error_handlers qlge_err_handler = {
4888 .error_detected = qlge_io_error_detected,
4889 .slot_reset = qlge_io_slot_reset,
4890 .resume = qlge_io_resume,
4891};
4892
4893static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4894{
4895 struct net_device *ndev = pci_get_drvdata(pdev);
4896 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004897 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004898
4899 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004900 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004901
4902 if (netif_running(ndev)) {
4903 err = ql_adapter_down(qdev);
4904 if (!err)
4905 return err;
4906 }
4907
Ron Mercerbc083ce2009-10-21 11:07:40 +00004908 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004909 err = pci_save_state(pdev);
4910 if (err)
4911 return err;
4912
4913 pci_disable_device(pdev);
4914
4915 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4916
4917 return 0;
4918}
4919
David S. Miller04da2cf2008-09-19 16:14:24 -07004920#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004921static int qlge_resume(struct pci_dev *pdev)
4922{
4923 struct net_device *ndev = pci_get_drvdata(pdev);
4924 struct ql_adapter *qdev = netdev_priv(ndev);
4925 int err;
4926
4927 pci_set_power_state(pdev, PCI_D0);
4928 pci_restore_state(pdev);
4929 err = pci_enable_device(pdev);
4930 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004931 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004932 return err;
4933 }
4934 pci_set_master(pdev);
4935
4936 pci_enable_wake(pdev, PCI_D3hot, 0);
4937 pci_enable_wake(pdev, PCI_D3cold, 0);
4938
4939 if (netif_running(ndev)) {
4940 err = ql_adapter_up(qdev);
4941 if (err)
4942 return err;
4943 }
4944
Breno Leitao72046d82010-07-01 03:00:17 +00004945 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004946 netif_device_attach(ndev);
4947
4948 return 0;
4949}
David S. Miller04da2cf2008-09-19 16:14:24 -07004950#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004951
4952static void qlge_shutdown(struct pci_dev *pdev)
4953{
4954 qlge_suspend(pdev, PMSG_SUSPEND);
4955}
4956
4957static struct pci_driver qlge_driver = {
4958 .name = DRV_NAME,
4959 .id_table = qlge_pci_tbl,
4960 .probe = qlge_probe,
4961 .remove = __devexit_p(qlge_remove),
4962#ifdef CONFIG_PM
4963 .suspend = qlge_suspend,
4964 .resume = qlge_resume,
4965#endif
4966 .shutdown = qlge_shutdown,
4967 .err_handler = &qlge_err_handler
4968};
4969
4970static int __init qlge_init_module(void)
4971{
4972 return pci_register_driver(&qlge_driver);
4973}
4974
4975static void __exit qlge_exit(void)
4976{
4977 pci_unregister_driver(&qlge_driver);
4978}
4979
4980module_init(qlge_init_module);
4981module_exit(qlge_exit);