blob: 582b23746876bd9cfef9bdf923b4d69e77af3a51 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378
Joe Perchesae9540f72010-02-09 11:49:52 +0000379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
383 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400384
385 status =
386 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400388 if (status)
389 goto exit;
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 type); /* type */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
394 status =
395 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400397 if (status)
398 goto exit;
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 type); /* type */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
403 status =
404 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400406 if (status)
407 goto exit;
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
410 type); /* type */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
414 */
Ron Mercer76b26692009-10-08 09:54:40 +0000415 cam_output = (CAM_OUT_ROUTE_NIC |
416 (qdev->
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400423 break;
424 }
425 case MAC_ADDR_TYPE_VLAN:
426 {
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
432 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
436 index,
437 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400438
439 status =
440 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 if (status)
443 goto exit;
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
446 type | /* type */
447 enable_bit); /* enable/disable */
448 break;
449 }
450 case MAC_ADDR_TYPE_MULTI_FLTR:
451 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400454 status = -EPERM;
455 }
456exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400457 return status;
458}
459
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000460/* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
463 */
464static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465{
466 int status;
467 char zero_mac_addr[ETH_ALEN];
468 char *addr;
469
470 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000471 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000474 } else {
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000479 }
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 return status;
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000489 return status;
490}
491
Ron Mercer6a473302009-07-02 06:06:12 +0000492void ql_link_on(struct ql_adapter *qdev)
493{
Joe Perchesae9540f72010-02-09 11:49:52 +0000494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
497}
498
499void ql_link_off(struct ql_adapter *qdev)
500{
Joe Perchesae9540f72010-02-09 11:49:52 +0000501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
504}
505
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400506/* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
508 */
509int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510{
511 int status = 0;
512
Ron Mercer939678f2009-01-04 17:08:29 -0800513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400514 if (status)
515 goto exit;
516
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400520 if (status)
521 goto exit;
522 *value = ql_read32(qdev, RT_DATA);
523exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 return status;
525}
526
527/* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
531 */
532static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int enable)
534{
Ron Mercer8587ea32009-02-23 10:42:15 +0000535 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400536 u32 value = 0;
537
Joe Perchesae9540f72010-02-09 11:49:52 +0000538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400559
560 switch (mask) {
561 case RT_IDX_CAM_HIT:
562 {
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
576 {
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 break;
581 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583 {
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
588 break;
589 }
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591 {
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
596 break;
597 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
599 {
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
606 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000607 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
613 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000614 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 break;
618 }
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
620 {
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 break;
625 }
626 case 0: /* Clear the E-bit on an entry. */
627 {
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
631 break;
632 }
633 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400636 status = -EPERM;
637 goto exit;
638 }
639
640 if (value) {
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 if (status)
643 goto exit;
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 }
648exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 return status;
650}
651
652static void ql_enable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655}
656
657static void ql_disable_interrupts(struct ql_adapter *qdev)
658{
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660}
661
662/* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300665 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400666 * a worker finishes. Once it hits zero we enable the interrupt.
667 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 u32 var = 0;
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
673
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
677 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400678 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700679 ctx->intr_en_mask);
680 var = ql_read32(qdev, STS);
681 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
687 ctx->intr_en_mask);
688 var = ql_read32(qdev, STS);
689 }
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400692}
693
694static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695{
696 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700697 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400698
Ron Mercerbb0d2152008-10-20 10:30:26 -0700699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
701 */
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 return 0;
704
705 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000706 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400708 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700709 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400710 var = ql_read32(qdev, STS);
711 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700712 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000713 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400714 return var;
715}
716
717static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718{
719 int i;
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
724 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726 i == 0))
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400728 ql_enable_completion_interrupt(qdev, i);
729 }
730
731}
732
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000733static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734{
735 int status, i;
736 u16 csum = 0;
737 __le16 *flash = (__le16 *)&qdev->flash;
738
739 status = strncmp((char *)&qdev->flash, str, 4);
740 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000742 return status;
743 }
744
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
747
748 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000751
752 return csum;
753}
754
Ron Mercer26351472009-02-02 13:53:57 -0800755static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400756{
757 int status = 0;
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 if (status)
762 goto exit;
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 if (status)
769 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
773 */
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400775exit:
776 return status;
777}
778
Ron Mercercdca8d02009-03-02 08:07:31 +0000779static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780{
781 u32 i, size;
782 int status;
783 __le32 *p = (__le32 *)&qdev->flash;
784 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000785 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000786
787 /* Get flash offset for function and adjust
788 * for dword access.
789 */
Ron Mercere4552f52009-06-09 05:39:32 +0000790 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792 else
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 return -ETIMEDOUT;
797
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
801 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000804 goto exit;
805 }
806 }
807
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
810 "8000");
811 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000813 status = -EINVAL;
814 goto exit;
815 }
816
Ron Mercer542512e2009-06-09 05:39:33 +0000817 /* Extract either manufacturer or BOFM modified
818 * MAC address.
819 */
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
821 memcpy(mac_addr,
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
824 else
825 memcpy(mac_addr,
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
828
829 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000831 status = -EINVAL;
832 goto exit;
833 }
834
835 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000836 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000837 qdev->ndev->addr_len);
838
839exit:
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
841 return status;
842}
843
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000844static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845{
846 int i;
847 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800848 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800849 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800851
852 /* Second function's parameters follow the first
853 * function's.
854 */
Ron Mercere4552f52009-06-09 05:39:32 +0000855 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000856 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400857
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 return -ETIMEDOUT;
860
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000861 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800862 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400863 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400866 goto exit;
867 }
868
869 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000870
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
873 "8012");
874 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000876 status = -EINVAL;
877 goto exit;
878 }
879
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881 status = -EINVAL;
882 goto exit;
883 }
884
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
888
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400889exit:
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
891 return status;
892}
893
894/* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
897 */
898static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899{
900 int status;
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 if (status)
905 return status;
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
910 return status;
911}
912
913/* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
916 */
917int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918{
919 int status = 0;
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 if (status)
924 goto exit;
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930 if (status)
931 goto exit;
932 /* get the data */
933 *data = ql_read32(qdev, XGMAC_DATA);
934exit:
935 return status;
936}
937
938/* This is used for reading the 64-bit statistics regs. */
939int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940{
941 int status = 0;
942 u32 hi = 0;
943 u32 lo = 0;
944
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
946 if (status)
947 goto exit;
948
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950 if (status)
951 goto exit;
952
953 *data = (u64) lo | ((u64) hi << 32);
954
955exit:
956 return status;
957}
958
Ron Mercercdca8d02009-03-02 08:07:31 +0000959static int ql_8000_port_initialize(struct ql_adapter *qdev)
960{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000961 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000962 /*
963 * Get MPI firmware version for driver banner
964 * and ethool info.
965 */
966 status = ql_mb_about_fw(qdev);
967 if (status)
968 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000969 status = ql_mb_get_fw_state(qdev);
970 if (status)
971 goto exit;
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974exit:
975 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000976}
977
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400978/* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
982 * later date.
983 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000984static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400985{
986 int status = 0;
987 u32 data;
988
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
992 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400999 }
1000 return status;
1001 }
1002
Joe Perchesae9540f72010-02-09 11:49:52 +00001003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 if (status)
1007 goto end;
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010 if (status)
1011 goto end;
1012
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019 if (status)
1020 goto end;
1021
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 if (status)
1025 goto end;
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029 if (status)
1030 goto end;
1031
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 if (status)
1035 goto end;
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 if (status)
1040 goto end;
1041
1042 /* Turn on jumbo. */
1043 status =
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045 if (status)
1046 goto end;
1047 status =
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049 if (status)
1050 goto end;
1051
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054end:
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056 return status;
1057}
1058
Ron Mercer7c734352009-10-19 03:32:19 +00001059static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060{
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1072 return lbq_desc;
1073}
1074
Ron Mercer7c734352009-10-19 03:32:19 +00001075static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1077{
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001081 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1084
1085 /* If it's the last chunk of our master page then
1086 * we unmap it.
1087 */
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1094 return lbq_desc;
1095}
1096
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001098static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001099{
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1105 return sbq_desc;
1106}
1107
1108/* Update an rx ring index. */
1109static void ql_update_cq(struct rx_ring *rx_ring)
1110{
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1116 }
1117}
1118
1119static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120{
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122}
1123
Ron Mercer7c734352009-10-19 03:32:19 +00001124static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1126{
1127 if (!rx_ring->pg_chunk.page) {
1128 u64 map;
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130 GFP_ATOMIC,
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001135 return -ENOMEM;
1136 }
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001146 return -ENOMEM;
1147 }
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 }
1151
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1154 */
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157 /* Adjust the master page chunk for next
1158 * buffer get.
1159 */
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1164 } else {
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1168 }
1169 return 0;
1170}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171/* Process (refill) a large buffer queue. */
1172static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173{
Ron Mercer49f21862009-02-23 10:42:16 +00001174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001177 u64 map;
1178 int i;
1179
Ron Mercer7c734352009-10-19 03:32:19 +00001180 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001181 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1184 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1189 return;
1190 }
Ron Mercer7c734352009-10-19 03:32:19 +00001191
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001196 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001197 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001198
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001202 clean_idx++;
1203 if (clean_idx == rx_ring->lbq_len)
1204 clean_idx = 0;
1205 }
1206
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001211 rx_ring->lbq_free_cnt -= 16;
1212 }
1213
1214 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001220 }
1221}
1222
1223/* Process (refill) a small buffer queue. */
1224static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225{
Ron Mercer49f21862009-02-23 10:42:16 +00001226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001229 u64 map;
1230 int i;
1231
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1237 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001238 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001239 netif_printk(qdev, rx_status, KERN_DEBUG,
1240 qdev->ndev,
1241 "sbq: getting new skb for index %d.\n",
1242 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001243 sbq_desc->p.skb =
1244 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001245 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001246 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001249 rx_ring->sbq_clean_idx = clean_idx;
1250 return;
1251 }
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001257 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001260 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001263 return;
1264 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001267 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001268 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001269 }
1270
1271 clean_idx++;
1272 if (clean_idx == rx_ring->sbq_len)
1273 clean_idx = 0;
1274 }
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001279 rx_ring->sbq_free_cnt -= 16;
1280 }
1281
1282 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 }
1289}
1290
1291static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1293{
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1296}
1297
1298/* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1300 */
1301static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1303{
1304 int i;
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1307 /*
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1314 * then its an OAL.
1315 */
1316 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001317 netif_printk(qdev, tx_done, KERN_DEBUG,
1318 qdev->ndev,
1319 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001320 }
1321 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001322 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001324 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001325 maplen),
1326 PCI_DMA_TODEVICE);
1327 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001330 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001331 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001332 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001333 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001334 maplen), PCI_DMA_TODEVICE);
1335 }
1336 }
1337
1338}
1339
1340/* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342 */
1343static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346{
1347 int len = skb_headlen(skb);
1348 dma_addr_t map;
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001356 }
1357 /*
1358 * Map the skb buffer first.
1359 */
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1363 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001366
1367 return NETDEV_TX_BUSY;
1368 }
1369
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001374 map_idx++;
1375
1376 /*
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1381 * in this list.
1382 */
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385 tbd++;
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1389 * look like this:
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1403 * etc...
1404 */
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 sizeof(struct oal),
1408 PCI_DMA_TODEVICE);
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1410 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1413 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001414 goto map_error;
1415 }
1416
1417 tbd->addr = cpu_to_le64(map);
1418 /*
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1422 */
1423 tbd->len =
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001427 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431 map_idx++;
1432 }
1433
Eric Dumazet9e903e02011-10-18 21:00:24 +00001434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001435 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001436
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001437 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001438 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1441 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001442 goto map_error;
1443 }
1444
1445 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001446 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001449 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001450
1451 }
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1457
1458map_error:
1459 /*
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1464 */
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1467}
1468
Ron Mercer4f848c02010-01-02 10:37:43 +00001469/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001470static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 u32 length,
1474 u16 vlan_id)
1475{
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001478 struct napi_struct *napi = &rx_ring->napi;
1479
1480 napi->dev = qdev->ndev;
1481
1482 skb = napi_get_frags(napi);
1483 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1488 return;
1489 }
1490 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1494 length);
Ron Mercer63526712010-01-02 10:37:44 +00001495
1496 skb->len += length;
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1500
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001508}
1509
1510/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001511static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1514 u32 length,
1515 u16 vlan_id)
1516{
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1519 void *addr;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1522
1523 skb = netdev_alloc_skb(ndev, length);
1524 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1529 return;
1530 }
1531
1532 addr = lbq_desc->p.pg_chunk.va;
1533 prefetch(addr);
1534
1535
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001538 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001540 rx_ring->rx_errors++;
1541 goto err_out;
1542 }
1543
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1546 */
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001550 rx_ring->rx_dropped++;
1551 goto err_out;
1552 }
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559 length-ETH_HLEN);
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1563
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001567 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001568
Michał Mirosław88230fd2011-04-18 13:31:21 +00001569 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571 /* TCP frame. */
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001579 struct iphdr *iph =
1580 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001581 if (!(iph->frag_off &
1582 cpu_to_be16(IP_MF|IP_OFFSET))) {
1583 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001584 netif_printk(qdev, rx_status, KERN_DEBUG,
1585 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001586 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001587 }
1588 }
1589 }
1590
1591 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001592 if (vlan_id != 0xffff)
1593 __vlan_hwaccel_put_tag(skb, vlan_id);
1594 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1595 napi_gro_receive(napi, skb);
1596 else
1597 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001598 return;
1599err_out:
1600 dev_kfree_skb_any(skb);
1601 put_page(lbq_desc->p.pg_chunk.page);
1602}
1603
1604/* Process an inbound completion from an rx ring. */
1605static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1606 struct rx_ring *rx_ring,
1607 struct ib_mac_iocb_rsp *ib_mac_rsp,
1608 u32 length,
1609 u16 vlan_id)
1610{
1611 struct net_device *ndev = qdev->ndev;
1612 struct sk_buff *skb = NULL;
1613 struct sk_buff *new_skb = NULL;
1614 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1615
1616 skb = sbq_desc->p.skb;
1617 /* Allocate new_skb and copy */
1618 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1619 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001620 netif_err(qdev, probe, qdev->ndev,
1621 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001622 rx_ring->rx_dropped++;
1623 return;
1624 }
1625 skb_reserve(new_skb, NET_IP_ALIGN);
1626 memcpy(skb_put(new_skb, length), skb->data, length);
1627 skb = new_skb;
1628
1629 /* Frame error, so drop the packet. */
1630 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001631 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001632 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001633 dev_kfree_skb_any(skb);
1634 rx_ring->rx_errors++;
1635 return;
1636 }
1637
1638 /* loopback self test for ethtool */
1639 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1640 ql_check_lb_frame(qdev, skb);
1641 dev_kfree_skb_any(skb);
1642 return;
1643 }
1644
1645 /* The max framesize filter on this chip is set higher than
1646 * MTU since FCoE uses 2k frames.
1647 */
1648 if (skb->len > ndev->mtu + ETH_HLEN) {
1649 dev_kfree_skb_any(skb);
1650 rx_ring->rx_dropped++;
1651 return;
1652 }
1653
1654 prefetch(skb->data);
1655 skb->dev = ndev;
1656 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001657 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 "%s Multicast.\n",
1659 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1660 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1661 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1662 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1663 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1664 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001665 }
1666 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001667 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1668 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001669
1670 rx_ring->rx_packets++;
1671 rx_ring->rx_bytes += skb->len;
1672 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001673 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001674
1675 /* If rx checksum is on, and there are no
1676 * csum or frame errors.
1677 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001678 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001679 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1680 /* TCP frame. */
1681 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001682 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1683 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001684 skb->ip_summed = CHECKSUM_UNNECESSARY;
1685 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1686 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1687 /* Unfragmented ipv4 UDP frame. */
1688 struct iphdr *iph = (struct iphdr *) skb->data;
1689 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001690 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001691 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001692 netif_printk(qdev, rx_status, KERN_DEBUG,
1693 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001694 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001695 }
1696 }
1697 }
1698
1699 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001700 if (vlan_id != 0xffff)
1701 __vlan_hwaccel_put_tag(skb, vlan_id);
1702 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1703 napi_gro_receive(&rx_ring->napi, skb);
1704 else
1705 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001706}
1707
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001708static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001709{
1710 void *temp_addr = skb->data;
1711
1712 /* Undo the skb_reserve(skb,32) we did before
1713 * giving to hardware, and realign data on
1714 * a 2-byte boundary.
1715 */
1716 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1718 skb_copy_to_linear_data(skb, temp_addr,
1719 (unsigned int)len);
1720}
1721
1722/*
1723 * This function builds an skb for the given inbound
1724 * completion. It will be rewritten for readability in the near
1725 * future, but for not it works well.
1726 */
1727static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1728 struct rx_ring *rx_ring,
1729 struct ib_mac_iocb_rsp *ib_mac_rsp)
1730{
1731 struct bq_desc *lbq_desc;
1732 struct bq_desc *sbq_desc;
1733 struct sk_buff *skb = NULL;
1734 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1735 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1736
1737 /*
1738 * Handle the header buffer if present.
1739 */
1740 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1741 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001742 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1743 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001744 /*
1745 * Headers fit nicely into a small buffer.
1746 */
1747 sbq_desc = ql_get_curr_sbuf(rx_ring);
1748 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001749 dma_unmap_addr(sbq_desc, mapaddr),
1750 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001751 PCI_DMA_FROMDEVICE);
1752 skb = sbq_desc->p.skb;
1753 ql_realign_skb(skb, hdr_len);
1754 skb_put(skb, hdr_len);
1755 sbq_desc->p.skb = NULL;
1756 }
1757
1758 /*
1759 * Handle the data buffer(s).
1760 */
1761 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001762 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1763 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001764 return skb;
1765 }
1766
1767 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1768 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001769 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1770 "Headers in small, data of %d bytes in small, combine them.\n",
1771 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001772 /*
1773 * Data is less than small buffer size so it's
1774 * stuffed in a small buffer.
1775 * For this case we append the data
1776 * from the "data" small buffer to the "header" small
1777 * buffer.
1778 */
1779 sbq_desc = ql_get_curr_sbuf(rx_ring);
1780 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001781 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001782 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001783 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001784 (sbq_desc, maplen),
1785 PCI_DMA_FROMDEVICE);
1786 memcpy(skb_put(skb, length),
1787 sbq_desc->p.skb->data, length);
1788 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001789 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001790 (sbq_desc,
1791 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001792 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001793 (sbq_desc,
1794 maplen),
1795 PCI_DMA_FROMDEVICE);
1796 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001797 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1798 "%d bytes in a single small buffer.\n",
1799 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001800 sbq_desc = ql_get_curr_sbuf(rx_ring);
1801 skb = sbq_desc->p.skb;
1802 ql_realign_skb(skb, length);
1803 skb_put(skb, length);
1804 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001805 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001807 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001808 maplen),
1809 PCI_DMA_FROMDEVICE);
1810 sbq_desc->p.skb = NULL;
1811 }
1812 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1813 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001814 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1815 "Header in small, %d bytes in large. Chain large to small!\n",
1816 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001817 /*
1818 * The data is in a single large buffer. We
1819 * chain it to the header buffer's skb and let
1820 * it rip.
1821 */
Ron Mercer7c734352009-10-19 03:32:19 +00001822 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001823 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1824 "Chaining page at offset = %d, for %d bytes to skb.\n",
1825 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001826 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1827 lbq_desc->p.pg_chunk.offset,
1828 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001829 skb->len += length;
1830 skb->data_len += length;
1831 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001832 } else {
1833 /*
1834 * The headers and data are in a single large buffer. We
1835 * copy it to a new skb and let it go. This can happen with
1836 * jumbo mtu on a non-TCP/UDP frame.
1837 */
Ron Mercer7c734352009-10-19 03:32:19 +00001838 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001839 skb = netdev_alloc_skb(qdev->ndev, length);
1840 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001841 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1842 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001843 return NULL;
1844 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001845 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001846 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001847 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001848 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001849 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001850 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001851 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1852 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1853 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001854 skb_fill_page_desc(skb, 0,
1855 lbq_desc->p.pg_chunk.page,
1856 lbq_desc->p.pg_chunk.offset,
1857 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001858 skb->len += length;
1859 skb->data_len += length;
1860 skb->truesize += length;
1861 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001862 __pskb_pull_tail(skb,
1863 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1864 VLAN_ETH_HLEN : ETH_HLEN);
1865 }
1866 } else {
1867 /*
1868 * The data is in a chain of large buffers
1869 * pointed to by a small buffer. We loop
1870 * thru and chain them to the our small header
1871 * buffer's skb.
1872 * frags: There are 18 max frags and our small
1873 * buffer will hold 32 of them. The thing is,
1874 * we'll use 3 max for our 9000 byte jumbo
1875 * frames. If the MTU goes up we could
1876 * eventually be in trouble.
1877 */
Ron Mercer7c734352009-10-19 03:32:19 +00001878 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001879 sbq_desc = ql_get_curr_sbuf(rx_ring);
1880 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001881 dma_unmap_addr(sbq_desc, mapaddr),
1882 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001883 PCI_DMA_FROMDEVICE);
1884 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1885 /*
1886 * This is an non TCP/UDP IP frame, so
1887 * the headers aren't split into a small
1888 * buffer. We have to use the small buffer
1889 * that contains our sg list as our skb to
1890 * send upstairs. Copy the sg list here to
1891 * a local buffer and use it to find the
1892 * pages to chain.
1893 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001894 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1895 "%d bytes of headers & data in chain of large.\n",
1896 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001897 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001898 sbq_desc->p.skb = NULL;
1899 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001900 }
1901 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001902 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1903 size = (length < rx_ring->lbq_buf_size) ? length :
1904 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001905
Joe Perchesae9540f72010-02-09 11:49:52 +00001906 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1907 "Adding page %d to skb for %d bytes.\n",
1908 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001909 skb_fill_page_desc(skb, i,
1910 lbq_desc->p.pg_chunk.page,
1911 lbq_desc->p.pg_chunk.offset,
1912 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001913 skb->len += size;
1914 skb->data_len += size;
1915 skb->truesize += size;
1916 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001917 i++;
1918 }
1919 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1920 VLAN_ETH_HLEN : ETH_HLEN);
1921 }
1922 return skb;
1923}
1924
1925/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001926static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001927 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001928 struct ib_mac_iocb_rsp *ib_mac_rsp,
1929 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001930{
1931 struct net_device *ndev = qdev->ndev;
1932 struct sk_buff *skb = NULL;
1933
1934 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1935
1936 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1937 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001938 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1939 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001940 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001941 return;
1942 }
1943
Ron Mercera32959c2009-06-09 05:39:27 +00001944 /* Frame error, so drop the packet. */
1945 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001946 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001947 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001948 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001949 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001950 return;
1951 }
Ron Mercerec33a492009-06-09 05:39:28 +00001952
1953 /* The max framesize filter on this chip is set higher than
1954 * MTU since FCoE uses 2k frames.
1955 */
1956 if (skb->len > ndev->mtu + ETH_HLEN) {
1957 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001958 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001959 return;
1960 }
1961
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001962 /* loopback self test for ethtool */
1963 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1964 ql_check_lb_frame(qdev, skb);
1965 dev_kfree_skb_any(skb);
1966 return;
1967 }
1968
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001969 prefetch(skb->data);
1970 skb->dev = ndev;
1971 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001972 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1973 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1975 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1976 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1977 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1978 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001979 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001980 }
1981 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001982 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1983 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001984 }
Ron Mercerd555f592009-03-09 10:59:19 +00001985
Ron Mercerd555f592009-03-09 10:59:19 +00001986 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001987 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001988
1989 /* If rx checksum is on, and there are no
1990 * csum or frame errors.
1991 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001992 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001993 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1994 /* TCP frame. */
1995 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001996 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1997 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001998 skb->ip_summed = CHECKSUM_UNNECESSARY;
1999 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2000 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2001 /* Unfragmented ipv4 UDP frame. */
2002 struct iphdr *iph = (struct iphdr *) skb->data;
2003 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002004 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002005 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002006 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2007 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002008 }
2009 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002010 }
Ron Mercerd555f592009-03-09 10:59:19 +00002011
Ron Mercer885ee392009-11-03 13:49:31 +00002012 rx_ring->rx_packets++;
2013 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002014 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002015 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2016 __vlan_hwaccel_put_tag(skb, vlan_id);
2017 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2018 napi_gro_receive(&rx_ring->napi, skb);
2019 else
2020 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002021}
2022
Ron Mercer4f848c02010-01-02 10:37:43 +00002023/* Process an inbound completion from an rx ring. */
2024static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2025 struct rx_ring *rx_ring,
2026 struct ib_mac_iocb_rsp *ib_mac_rsp)
2027{
2028 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2029 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2030 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2031 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2032
2033 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2034
2035 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2036 /* The data and headers are split into
2037 * separate buffers.
2038 */
2039 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2040 vlan_id);
2041 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2042 /* The data fit in a single small buffer.
2043 * Allocate a new skb, copy the data and
2044 * return the buffer to the free pool.
2045 */
2046 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2047 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002048 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2049 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2050 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2051 /* TCP packet in a page chunk that's been checksummed.
2052 * Tack it on to our GRO skb and let it go.
2053 */
2054 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2055 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002056 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2057 /* Non-TCP packet in a page chunk. Allocate an
2058 * skb, tack it on frags, and send it up.
2059 */
2060 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2061 length, vlan_id);
2062 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002063 /* Non-TCP/UDP large frames that span multiple buffers
2064 * can be processed corrrectly by the split frame logic.
2065 */
2066 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2067 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002068 }
2069
2070 return (unsigned long)length;
2071}
2072
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002073/* Process an outbound completion from an rx ring. */
2074static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2075 struct ob_mac_iocb_rsp *mac_rsp)
2076{
2077 struct tx_ring *tx_ring;
2078 struct tx_ring_desc *tx_ring_desc;
2079
2080 QL_DUMP_OB_MAC_RSP(mac_rsp);
2081 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2082 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2083 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002084 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2085 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002086 dev_kfree_skb(tx_ring_desc->skb);
2087 tx_ring_desc->skb = NULL;
2088
2089 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2090 OB_MAC_IOCB_RSP_S |
2091 OB_MAC_IOCB_RSP_L |
2092 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2093 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002094 netif_warn(qdev, tx_done, qdev->ndev,
2095 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002096 }
2097 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002098 netif_warn(qdev, tx_done, qdev->ndev,
2099 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002100 }
2101 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002102 netif_warn(qdev, tx_done, qdev->ndev,
2103 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002104 }
2105 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002106 netif_warn(qdev, tx_done, qdev->ndev,
2107 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002108 }
2109 }
2110 atomic_inc(&tx_ring->tx_count);
2111}
2112
2113/* Fire up a handler to reset the MPI processor. */
2114void ql_queue_fw_error(struct ql_adapter *qdev)
2115{
Ron Mercer6a473302009-07-02 06:06:12 +00002116 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002117 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2118}
2119
2120void ql_queue_asic_error(struct ql_adapter *qdev)
2121{
Ron Mercer6a473302009-07-02 06:06:12 +00002122 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002124 /* Clear adapter up bit to signal the recovery
2125 * process that it shouldn't kill the reset worker
2126 * thread
2127 */
2128 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002129 /* Set asic recovery bit to indicate reset process that we are
2130 * in fatal error recovery process rather than normal close
2131 */
2132 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002133 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2134}
2135
2136static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2137 struct ib_ae_iocb_rsp *ib_ae_rsp)
2138{
2139 switch (ib_ae_rsp->event) {
2140 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002141 netif_err(qdev, rx_err, qdev->ndev,
2142 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002143 ql_queue_fw_error(qdev);
2144 return;
2145
2146 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002147 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2148 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002149 ql_queue_asic_error(qdev);
2150 return;
2151
2152 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002153 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002154 ql_queue_asic_error(qdev);
2155 break;
2156
2157 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002158 netdev_err(qdev->ndev, "PCI error occurred when reading "
2159 "anonymous buffers from rx_ring %d.\n",
2160 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002161 ql_queue_asic_error(qdev);
2162 break;
2163
2164 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002165 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2166 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 ql_queue_asic_error(qdev);
2168 break;
2169 }
2170}
2171
2172static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2173{
2174 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002175 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002176 struct ob_mac_iocb_rsp *net_rsp = NULL;
2177 int count = 0;
2178
Ron Mercer1e213302009-03-09 10:59:21 +00002179 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002180 /* While there are entries in the completion queue. */
2181 while (prod != rx_ring->cnsmr_idx) {
2182
Joe Perchesae9540f72010-02-09 11:49:52 +00002183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2184 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2185 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002186
2187 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2188 rmb();
2189 switch (net_rsp->opcode) {
2190
2191 case OPCODE_OB_MAC_TSO_IOCB:
2192 case OPCODE_OB_MAC_IOCB:
2193 ql_process_mac_tx_intr(qdev, net_rsp);
2194 break;
2195 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002196 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2197 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2198 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002199 }
2200 count++;
2201 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002202 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002203 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002204 if (!net_rsp)
2205 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002206 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002207 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002208 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002209 if (atomic_read(&tx_ring->queue_stopped) &&
2210 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2211 /*
2212 * The queue got stopped because the tx_ring was full.
2213 * Wake it up, because it's now at least 25% empty.
2214 */
Ron Mercer1e213302009-03-09 10:59:21 +00002215 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002216 }
2217
2218 return count;
2219}
2220
2221static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2222{
2223 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002224 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002225 struct ql_net_rsp_iocb *net_rsp;
2226 int count = 0;
2227
2228 /* While there are entries in the completion queue. */
2229 while (prod != rx_ring->cnsmr_idx) {
2230
Joe Perchesae9540f72010-02-09 11:49:52 +00002231 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2232 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2233 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002234
2235 net_rsp = rx_ring->curr_entry;
2236 rmb();
2237 switch (net_rsp->opcode) {
2238 case OPCODE_IB_MAC_IOCB:
2239 ql_process_mac_rx_intr(qdev, rx_ring,
2240 (struct ib_mac_iocb_rsp *)
2241 net_rsp);
2242 break;
2243
2244 case OPCODE_IB_AE_IOCB:
2245 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2246 net_rsp);
2247 break;
2248 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002249 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2250 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2251 net_rsp->opcode);
2252 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002253 }
2254 count++;
2255 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002256 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002257 if (count == budget)
2258 break;
2259 }
2260 ql_update_buffer_queues(qdev, rx_ring);
2261 ql_write_cq_idx(rx_ring);
2262 return count;
2263}
2264
2265static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2266{
2267 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2268 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002269 struct rx_ring *trx_ring;
2270 int i, work_done = 0;
2271 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002272
Joe Perchesae9540f72010-02-09 11:49:52 +00002273 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2274 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002275
Ron Mercer39aa8162009-08-27 11:02:11 +00002276 /* Service the TX rings first. They start
2277 * right after the RSS rings. */
2278 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2279 trx_ring = &qdev->rx_ring[i];
2280 /* If this TX completion ring belongs to this vector and
2281 * it's not empty then service it.
2282 */
2283 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2284 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2285 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002286 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2287 "%s: Servicing TX completion ring %d.\n",
2288 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002289 ql_clean_outbound_rx_ring(trx_ring);
2290 }
2291 }
2292
2293 /*
2294 * Now service the RSS ring if it's active.
2295 */
2296 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2297 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002298 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2299 "%s: Servicing RX completion ring %d.\n",
2300 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002301 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2302 }
2303
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002304 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002305 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002306 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2307 }
2308 return work_done;
2309}
2310
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002311static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002312{
2313 struct ql_adapter *qdev = netdev_priv(ndev);
2314
Jiri Pirko18c49b92011-07-21 03:24:11 +00002315 if (features & NETIF_F_HW_VLAN_RX) {
2316 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002317 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002318 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002319 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002320 } else {
Jiri Pirko18c49b92011-07-21 03:24:11 +00002321 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00002322 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002323 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2324 }
2325}
2326
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002327static netdev_features_t qlge_fix_features(struct net_device *ndev,
2328 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002329{
2330 /*
2331 * Since there is no support for separate rx/tx vlan accel
2332 * enable/disable make sure tx flag is always in same state as rx.
2333 */
2334 if (features & NETIF_F_HW_VLAN_RX)
2335 features |= NETIF_F_HW_VLAN_TX;
2336 else
2337 features &= ~NETIF_F_HW_VLAN_TX;
2338
2339 return features;
2340}
2341
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002342static int qlge_set_features(struct net_device *ndev,
2343 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002344{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002345 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002346
2347 if (changed & NETIF_F_HW_VLAN_RX)
2348 qlge_vlan_mode(ndev, features);
2349
2350 return 0;
2351}
2352
Jiri Pirko8e586132011-12-08 19:52:37 -05002353static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002354{
2355 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002356 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002357
Jiri Pirko8e586132011-12-08 19:52:37 -05002358 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2359 MAC_ADDR_TYPE_VLAN, vid);
2360 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002361 netif_err(qdev, ifup, qdev->ndev,
2362 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002363 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002364}
2365
Jiri Pirko8e586132011-12-08 19:52:37 -05002366static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002367{
2368 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002369 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002370 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002371
Ron Mercercc288f52009-02-23 10:42:14 +00002372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002374 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002375
Jiri Pirko8e586132011-12-08 19:52:37 -05002376 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002377 set_bit(vid, qdev->active_vlans);
2378
Ron Mercercc288f52009-02-23 10:42:14 +00002379 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002380
2381 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002382}
2383
Jiri Pirko8e586132011-12-08 19:52:37 -05002384static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002385{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002386 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002387 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002388
Jiri Pirko8e586132011-12-08 19:52:37 -05002389 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2390 MAC_ADDR_TYPE_VLAN, vid);
2391 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002392 netif_err(qdev, ifup, qdev->ndev,
2393 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002394 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002395}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002396
Jiri Pirko8e586132011-12-08 19:52:37 -05002397static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002398{
2399 struct ql_adapter *qdev = netdev_priv(ndev);
2400 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002401 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002402
2403 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2404 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002405 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002406
Jiri Pirko8e586132011-12-08 19:52:37 -05002407 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002408 clear_bit(vid, qdev->active_vlans);
2409
2410 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002411
2412 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002413}
2414
Ron Mercerc1b60092010-10-27 04:58:12 +00002415static void qlge_restore_vlan(struct ql_adapter *qdev)
2416{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002417 int status;
2418 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002419
Jiri Pirko18c49b92011-07-21 03:24:11 +00002420 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2421 if (status)
2422 return;
2423
2424 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2425 __qlge_vlan_rx_add_vid(qdev, vid);
2426
2427 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002428}
2429
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002430/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2431static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2432{
2433 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002434 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002435 return IRQ_HANDLED;
2436}
2437
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002438/* This handles a fatal error, MPI activity, and the default
2439 * rx_ring in an MSI-X multiple vector environment.
2440 * In MSI/Legacy environment it also process the rest of
2441 * the rx_rings.
2442 */
2443static irqreturn_t qlge_isr(int irq, void *dev_id)
2444{
2445 struct rx_ring *rx_ring = dev_id;
2446 struct ql_adapter *qdev = rx_ring->qdev;
2447 struct intr_context *intr_context = &qdev->intr_context[0];
2448 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002449 int work_done = 0;
2450
Ron Mercerbb0d2152008-10-20 10:30:26 -07002451 spin_lock(&qdev->hw_lock);
2452 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002453 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2454 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002455 spin_unlock(&qdev->hw_lock);
2456 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002457 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002458 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002459
Ron Mercerbb0d2152008-10-20 10:30:26 -07002460 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002461
2462 /*
2463 * Check for fatal error.
2464 */
2465 if (var & STS_FE) {
2466 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002467 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002468 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002469 netdev_err(qdev->ndev, "Resetting chip. "
2470 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002471 return IRQ_HANDLED;
2472 }
2473
2474 /*
2475 * Check MPI processor activity.
2476 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002477 if ((var & STS_PI) &&
2478 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002479 /*
2480 * We've got an async event or mailbox completion.
2481 * Handle it and clear the source of the interrupt.
2482 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002483 netif_err(qdev, intr, qdev->ndev,
2484 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002485 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002486 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2487 queue_delayed_work_on(smp_processor_id(),
2488 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002489 work_done++;
2490 }
2491
2492 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002493 * Get the bit-mask that shows the active queues for this
2494 * pass. Compare it to the queues that this irq services
2495 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002496 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002497 var = ql_read32(qdev, ISR1);
2498 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002499 netif_info(qdev, intr, qdev->ndev,
2500 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002501 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002502 napi_schedule(&rx_ring->napi);
2503 work_done++;
2504 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002505 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002506 return work_done ? IRQ_HANDLED : IRQ_NONE;
2507}
2508
2509static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2510{
2511
2512 if (skb_is_gso(skb)) {
2513 int err;
2514 if (skb_header_cloned(skb)) {
2515 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2516 if (err)
2517 return err;
2518 }
2519
2520 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2521 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2522 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2523 mac_iocb_ptr->total_hdrs_len =
2524 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2525 mac_iocb_ptr->net_trans_offset =
2526 cpu_to_le16(skb_network_offset(skb) |
2527 skb_transport_offset(skb)
2528 << OB_MAC_TRANSPORT_HDR_SHIFT);
2529 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2530 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2531 if (likely(skb->protocol == htons(ETH_P_IP))) {
2532 struct iphdr *iph = ip_hdr(skb);
2533 iph->check = 0;
2534 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2535 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2536 iph->daddr, 0,
2537 IPPROTO_TCP,
2538 0);
2539 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2540 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2541 tcp_hdr(skb)->check =
2542 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2543 &ipv6_hdr(skb)->daddr,
2544 0, IPPROTO_TCP, 0);
2545 }
2546 return 1;
2547 }
2548 return 0;
2549}
2550
2551static void ql_hw_csum_setup(struct sk_buff *skb,
2552 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2553{
2554 int len;
2555 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002556 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002557 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2558 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2559 mac_iocb_ptr->net_trans_offset =
2560 cpu_to_le16(skb_network_offset(skb) |
2561 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2562
2563 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2564 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2565 if (likely(iph->protocol == IPPROTO_TCP)) {
2566 check = &(tcp_hdr(skb)->check);
2567 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2568 mac_iocb_ptr->total_hdrs_len =
2569 cpu_to_le16(skb_transport_offset(skb) +
2570 (tcp_hdr(skb)->doff << 2));
2571 } else {
2572 check = &(udp_hdr(skb)->check);
2573 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2574 mac_iocb_ptr->total_hdrs_len =
2575 cpu_to_le16(skb_transport_offset(skb) +
2576 sizeof(struct udphdr));
2577 }
2578 *check = ~csum_tcpudp_magic(iph->saddr,
2579 iph->daddr, len, iph->protocol, 0);
2580}
2581
Stephen Hemminger613573252009-08-31 19:50:58 +00002582static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002583{
2584 struct tx_ring_desc *tx_ring_desc;
2585 struct ob_mac_iocb_req *mac_iocb_ptr;
2586 struct ql_adapter *qdev = netdev_priv(ndev);
2587 int tso;
2588 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002589 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002590
2591 tx_ring = &qdev->tx_ring[tx_ring_idx];
2592
Ron Mercer74c50b42009-03-09 10:59:27 +00002593 if (skb_padto(skb, ETH_ZLEN))
2594 return NETDEV_TX_OK;
2595
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002596 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002597 netif_info(qdev, tx_queued, qdev->ndev,
2598 "%s: shutting down tx queue %d du to lack of resources.\n",
2599 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002600 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002601 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002602 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002603 return NETDEV_TX_BUSY;
2604 }
2605 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2606 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002607 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002608
2609 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2610 mac_iocb_ptr->tid = tx_ring_desc->index;
2611 /* We use the upper 32-bits to store the tx queue for this IO.
2612 * When we get the completion we can use it to establish the context.
2613 */
2614 mac_iocb_ptr->txq_idx = tx_ring_idx;
2615 tx_ring_desc->skb = skb;
2616
2617 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2618
Jesse Grosseab6d182010-10-20 13:56:03 +00002619 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002620 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2621 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002622 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2623 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2624 }
2625 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2626 if (tso < 0) {
2627 dev_kfree_skb_any(skb);
2628 return NETDEV_TX_OK;
2629 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2630 ql_hw_csum_setup(skb,
2631 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2632 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002633 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2634 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002635 netif_err(qdev, tx_queued, qdev->ndev,
2636 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002637 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002638 return NETDEV_TX_BUSY;
2639 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002640 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2641 tx_ring->prod_idx++;
2642 if (tx_ring->prod_idx == tx_ring->wq_len)
2643 tx_ring->prod_idx = 0;
2644 wmb();
2645
2646 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002647 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2648 "tx queued, slot %d, len %d\n",
2649 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002650
2651 atomic_dec(&tx_ring->tx_count);
2652 return NETDEV_TX_OK;
2653}
2654
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002655
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002656static void ql_free_shadow_space(struct ql_adapter *qdev)
2657{
2658 if (qdev->rx_ring_shadow_reg_area) {
2659 pci_free_consistent(qdev->pdev,
2660 PAGE_SIZE,
2661 qdev->rx_ring_shadow_reg_area,
2662 qdev->rx_ring_shadow_reg_dma);
2663 qdev->rx_ring_shadow_reg_area = NULL;
2664 }
2665 if (qdev->tx_ring_shadow_reg_area) {
2666 pci_free_consistent(qdev->pdev,
2667 PAGE_SIZE,
2668 qdev->tx_ring_shadow_reg_area,
2669 qdev->tx_ring_shadow_reg_dma);
2670 qdev->tx_ring_shadow_reg_area = NULL;
2671 }
2672}
2673
2674static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2675{
2676 qdev->rx_ring_shadow_reg_area =
2677 pci_alloc_consistent(qdev->pdev,
2678 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2679 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002680 netif_err(qdev, ifup, qdev->ndev,
2681 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002682 return -ENOMEM;
2683 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002684 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002685 qdev->tx_ring_shadow_reg_area =
2686 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2687 &qdev->tx_ring_shadow_reg_dma);
2688 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002689 netif_err(qdev, ifup, qdev->ndev,
2690 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002691 goto err_wqp_sh_area;
2692 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002693 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002694 return 0;
2695
2696err_wqp_sh_area:
2697 pci_free_consistent(qdev->pdev,
2698 PAGE_SIZE,
2699 qdev->rx_ring_shadow_reg_area,
2700 qdev->rx_ring_shadow_reg_dma);
2701 return -ENOMEM;
2702}
2703
2704static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2705{
2706 struct tx_ring_desc *tx_ring_desc;
2707 int i;
2708 struct ob_mac_iocb_req *mac_iocb_ptr;
2709
2710 mac_iocb_ptr = tx_ring->wq_base;
2711 tx_ring_desc = tx_ring->q;
2712 for (i = 0; i < tx_ring->wq_len; i++) {
2713 tx_ring_desc->index = i;
2714 tx_ring_desc->skb = NULL;
2715 tx_ring_desc->queue_entry = mac_iocb_ptr;
2716 mac_iocb_ptr++;
2717 tx_ring_desc++;
2718 }
2719 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2720 atomic_set(&tx_ring->queue_stopped, 0);
2721}
2722
2723static void ql_free_tx_resources(struct ql_adapter *qdev,
2724 struct tx_ring *tx_ring)
2725{
2726 if (tx_ring->wq_base) {
2727 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2728 tx_ring->wq_base, tx_ring->wq_base_dma);
2729 tx_ring->wq_base = NULL;
2730 }
2731 kfree(tx_ring->q);
2732 tx_ring->q = NULL;
2733}
2734
2735static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2736 struct tx_ring *tx_ring)
2737{
2738 tx_ring->wq_base =
2739 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2740 &tx_ring->wq_base_dma);
2741
Joe Perches8e95a202009-12-03 07:58:21 +00002742 if ((tx_ring->wq_base == NULL) ||
2743 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002744 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002745 return -ENOMEM;
2746 }
2747 tx_ring->q =
2748 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2749 if (tx_ring->q == NULL)
2750 goto err;
2751
2752 return 0;
2753err:
2754 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2755 tx_ring->wq_base, tx_ring->wq_base_dma);
2756 return -ENOMEM;
2757}
2758
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002759static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002760{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002761 struct bq_desc *lbq_desc;
2762
Ron Mercer7c734352009-10-19 03:32:19 +00002763 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002764
Ron Mercer7c734352009-10-19 03:32:19 +00002765 curr_idx = rx_ring->lbq_curr_idx;
2766 clean_idx = rx_ring->lbq_clean_idx;
2767 while (curr_idx != clean_idx) {
2768 lbq_desc = &rx_ring->lbq[curr_idx];
2769
2770 if (lbq_desc->p.pg_chunk.last_flag) {
2771 pci_unmap_page(qdev->pdev,
2772 lbq_desc->p.pg_chunk.map,
2773 ql_lbq_block_size(qdev),
2774 PCI_DMA_FROMDEVICE);
2775 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002776 }
Ron Mercer7c734352009-10-19 03:32:19 +00002777
2778 put_page(lbq_desc->p.pg_chunk.page);
2779 lbq_desc->p.pg_chunk.page = NULL;
2780
2781 if (++curr_idx == rx_ring->lbq_len)
2782 curr_idx = 0;
2783
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002784 }
2785}
2786
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002787static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002788{
2789 int i;
2790 struct bq_desc *sbq_desc;
2791
2792 for (i = 0; i < rx_ring->sbq_len; i++) {
2793 sbq_desc = &rx_ring->sbq[i];
2794 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002795 netif_err(qdev, ifup, qdev->ndev,
2796 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002797 return;
2798 }
2799 if (sbq_desc->p.skb) {
2800 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002801 dma_unmap_addr(sbq_desc, mapaddr),
2802 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002803 PCI_DMA_FROMDEVICE);
2804 dev_kfree_skb(sbq_desc->p.skb);
2805 sbq_desc->p.skb = NULL;
2806 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002807 }
2808}
2809
Ron Mercer4545a3f2009-02-23 10:42:17 +00002810/* Free all large and small rx buffers associated
2811 * with the completion queues for this device.
2812 */
2813static void ql_free_rx_buffers(struct ql_adapter *qdev)
2814{
2815 int i;
2816 struct rx_ring *rx_ring;
2817
2818 for (i = 0; i < qdev->rx_ring_count; i++) {
2819 rx_ring = &qdev->rx_ring[i];
2820 if (rx_ring->lbq)
2821 ql_free_lbq_buffers(qdev, rx_ring);
2822 if (rx_ring->sbq)
2823 ql_free_sbq_buffers(qdev, rx_ring);
2824 }
2825}
2826
2827static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2828{
2829 struct rx_ring *rx_ring;
2830 int i;
2831
2832 for (i = 0; i < qdev->rx_ring_count; i++) {
2833 rx_ring = &qdev->rx_ring[i];
2834 if (rx_ring->type != TX_Q)
2835 ql_update_buffer_queues(qdev, rx_ring);
2836 }
2837}
2838
2839static void ql_init_lbq_ring(struct ql_adapter *qdev,
2840 struct rx_ring *rx_ring)
2841{
2842 int i;
2843 struct bq_desc *lbq_desc;
2844 __le64 *bq = rx_ring->lbq_base;
2845
2846 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2847 for (i = 0; i < rx_ring->lbq_len; i++) {
2848 lbq_desc = &rx_ring->lbq[i];
2849 memset(lbq_desc, 0, sizeof(*lbq_desc));
2850 lbq_desc->index = i;
2851 lbq_desc->addr = bq;
2852 bq++;
2853 }
2854}
2855
2856static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002857 struct rx_ring *rx_ring)
2858{
2859 int i;
2860 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002861 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002862
Ron Mercer4545a3f2009-02-23 10:42:17 +00002863 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002864 for (i = 0; i < rx_ring->sbq_len; i++) {
2865 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002866 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002867 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002868 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002869 bq++;
2870 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002871}
2872
2873static void ql_free_rx_resources(struct ql_adapter *qdev,
2874 struct rx_ring *rx_ring)
2875{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002876 /* Free the small buffer queue. */
2877 if (rx_ring->sbq_base) {
2878 pci_free_consistent(qdev->pdev,
2879 rx_ring->sbq_size,
2880 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2881 rx_ring->sbq_base = NULL;
2882 }
2883
2884 /* Free the small buffer queue control blocks. */
2885 kfree(rx_ring->sbq);
2886 rx_ring->sbq = NULL;
2887
2888 /* Free the large buffer queue. */
2889 if (rx_ring->lbq_base) {
2890 pci_free_consistent(qdev->pdev,
2891 rx_ring->lbq_size,
2892 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2893 rx_ring->lbq_base = NULL;
2894 }
2895
2896 /* Free the large buffer queue control blocks. */
2897 kfree(rx_ring->lbq);
2898 rx_ring->lbq = NULL;
2899
2900 /* Free the rx queue. */
2901 if (rx_ring->cq_base) {
2902 pci_free_consistent(qdev->pdev,
2903 rx_ring->cq_size,
2904 rx_ring->cq_base, rx_ring->cq_base_dma);
2905 rx_ring->cq_base = NULL;
2906 }
2907}
2908
2909/* Allocate queues and buffers for this completions queue based
2910 * on the values in the parameter structure. */
2911static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2912 struct rx_ring *rx_ring)
2913{
2914
2915 /*
2916 * Allocate the completion queue for this rx_ring.
2917 */
2918 rx_ring->cq_base =
2919 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2920 &rx_ring->cq_base_dma);
2921
2922 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002923 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002924 return -ENOMEM;
2925 }
2926
2927 if (rx_ring->sbq_len) {
2928 /*
2929 * Allocate small buffer queue.
2930 */
2931 rx_ring->sbq_base =
2932 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2933 &rx_ring->sbq_base_dma);
2934
2935 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002936 netif_err(qdev, ifup, qdev->ndev,
2937 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002938 goto err_mem;
2939 }
2940
2941 /*
2942 * Allocate small buffer queue control blocks.
2943 */
2944 rx_ring->sbq =
2945 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2946 GFP_KERNEL);
2947 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002948 netif_err(qdev, ifup, qdev->ndev,
2949 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002950 goto err_mem;
2951 }
2952
Ron Mercer4545a3f2009-02-23 10:42:17 +00002953 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002954 }
2955
2956 if (rx_ring->lbq_len) {
2957 /*
2958 * Allocate large buffer queue.
2959 */
2960 rx_ring->lbq_base =
2961 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2962 &rx_ring->lbq_base_dma);
2963
2964 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002965 netif_err(qdev, ifup, qdev->ndev,
2966 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002967 goto err_mem;
2968 }
2969 /*
2970 * Allocate large buffer queue control blocks.
2971 */
2972 rx_ring->lbq =
2973 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2974 GFP_KERNEL);
2975 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002976 netif_err(qdev, ifup, qdev->ndev,
2977 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002978 goto err_mem;
2979 }
2980
Ron Mercer4545a3f2009-02-23 10:42:17 +00002981 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002982 }
2983
2984 return 0;
2985
2986err_mem:
2987 ql_free_rx_resources(qdev, rx_ring);
2988 return -ENOMEM;
2989}
2990
2991static void ql_tx_ring_clean(struct ql_adapter *qdev)
2992{
2993 struct tx_ring *tx_ring;
2994 struct tx_ring_desc *tx_ring_desc;
2995 int i, j;
2996
2997 /*
2998 * Loop through all queues and free
2999 * any resources.
3000 */
3001 for (j = 0; j < qdev->tx_ring_count; j++) {
3002 tx_ring = &qdev->tx_ring[j];
3003 for (i = 0; i < tx_ring->wq_len; i++) {
3004 tx_ring_desc = &tx_ring->q[i];
3005 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003006 netif_err(qdev, ifdown, qdev->ndev,
3007 "Freeing lost SKB %p, from queue %d, index %d.\n",
3008 tx_ring_desc->skb, j,
3009 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003010 ql_unmap_send(qdev, tx_ring_desc,
3011 tx_ring_desc->map_cnt);
3012 dev_kfree_skb(tx_ring_desc->skb);
3013 tx_ring_desc->skb = NULL;
3014 }
3015 }
3016 }
3017}
3018
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003019static void ql_free_mem_resources(struct ql_adapter *qdev)
3020{
3021 int i;
3022
3023 for (i = 0; i < qdev->tx_ring_count; i++)
3024 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3025 for (i = 0; i < qdev->rx_ring_count; i++)
3026 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3027 ql_free_shadow_space(qdev);
3028}
3029
3030static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3031{
3032 int i;
3033
3034 /* Allocate space for our shadow registers and such. */
3035 if (ql_alloc_shadow_space(qdev))
3036 return -ENOMEM;
3037
3038 for (i = 0; i < qdev->rx_ring_count; i++) {
3039 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003040 netif_err(qdev, ifup, qdev->ndev,
3041 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003042 goto err_mem;
3043 }
3044 }
3045 /* Allocate tx queue resources */
3046 for (i = 0; i < qdev->tx_ring_count; i++) {
3047 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003048 netif_err(qdev, ifup, qdev->ndev,
3049 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003050 goto err_mem;
3051 }
3052 }
3053 return 0;
3054
3055err_mem:
3056 ql_free_mem_resources(qdev);
3057 return -ENOMEM;
3058}
3059
3060/* Set up the rx ring control block and pass it to the chip.
3061 * The control block is defined as
3062 * "Completion Queue Initialization Control Block", or cqicb.
3063 */
3064static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3065{
3066 struct cqicb *cqicb = &rx_ring->cqicb;
3067 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003068 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003069 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003070 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003071 void __iomem *doorbell_area =
3072 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3073 int err = 0;
3074 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003075 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003076 __le64 *base_indirect_ptr;
3077 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003078
3079 /* Set up the shadow registers for this ring. */
3080 rx_ring->prod_idx_sh_reg = shadow_reg;
3081 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003082 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003083 shadow_reg += sizeof(u64);
3084 shadow_reg_dma += sizeof(u64);
3085 rx_ring->lbq_base_indirect = shadow_reg;
3086 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003087 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3088 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003089 rx_ring->sbq_base_indirect = shadow_reg;
3090 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3091
3092 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003093 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003094 rx_ring->cnsmr_idx = 0;
3095 rx_ring->curr_entry = rx_ring->cq_base;
3096
3097 /* PCI doorbell mem area + 0x04 for valid register */
3098 rx_ring->valid_db_reg = doorbell_area + 0x04;
3099
3100 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003101 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003102
3103 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003104 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003105
3106 memset((void *)cqicb, 0, sizeof(struct cqicb));
3107 cqicb->msix_vect = rx_ring->irq;
3108
Ron Mercer459caf52009-01-04 17:08:11 -08003109 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3110 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003111
Ron Mercer97345522009-01-09 11:31:50 +00003112 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003113
Ron Mercer97345522009-01-09 11:31:50 +00003114 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003115
3116 /*
3117 * Set up the control block load flags.
3118 */
3119 cqicb->flags = FLAGS_LC | /* Load queue base address */
3120 FLAGS_LV | /* Load MSI-X vector */
3121 FLAGS_LI; /* Load irq delay values */
3122 if (rx_ring->lbq_len) {
3123 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003124 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003125 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003126 page_entries = 0;
3127 do {
3128 *base_indirect_ptr = cpu_to_le64(tmp);
3129 tmp += DB_PAGE_SIZE;
3130 base_indirect_ptr++;
3131 page_entries++;
3132 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003133 cqicb->lbq_addr =
3134 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003135 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3136 (u16) rx_ring->lbq_buf_size;
3137 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3138 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3139 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003140 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003141 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003143 rx_ring->lbq_clean_idx = 0;
3144 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003145 }
3146 if (rx_ring->sbq_len) {
3147 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003148 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003149 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003150 page_entries = 0;
3151 do {
3152 *base_indirect_ptr = cpu_to_le64(tmp);
3153 tmp += DB_PAGE_SIZE;
3154 base_indirect_ptr++;
3155 page_entries++;
3156 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003157 cqicb->sbq_addr =
3158 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003159 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003160 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003161 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3162 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003163 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003164 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003165 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003166 rx_ring->sbq_clean_idx = 0;
3167 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003168 }
3169 switch (rx_ring->type) {
3170 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003171 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3172 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3173 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003174 case RX_Q:
3175 /* Inbound completion handling rx_rings run in
3176 * separate NAPI contexts.
3177 */
3178 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3179 64);
3180 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3181 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3182 break;
3183 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003184 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3185 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003186 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003187 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3188 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003189 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3190 CFG_LCQ, rx_ring->cq_id);
3191 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003192 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003193 return err;
3194 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003195 return err;
3196}
3197
3198static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3199{
3200 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3201 void __iomem *doorbell_area =
3202 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3203 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3204 (tx_ring->wq_id * sizeof(u64));
3205 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3206 (tx_ring->wq_id * sizeof(u64));
3207 int err = 0;
3208
3209 /*
3210 * Assign doorbell registers for this tx_ring.
3211 */
3212 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003213 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003214 tx_ring->prod_idx = 0;
3215 /* TX PCI doorbell mem area + 0x04 */
3216 tx_ring->valid_db_reg = doorbell_area + 0x04;
3217
3218 /*
3219 * Assign shadow registers for this tx_ring.
3220 */
3221 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3222 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3223
3224 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3225 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3226 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3227 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3228 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003229 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230
Ron Mercer97345522009-01-09 11:31:50 +00003231 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232
3233 ql_init_tx_ring(qdev, tx_ring);
3234
Ron Mercere3324712009-07-02 06:06:13 +00003235 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 (u16) tx_ring->wq_id);
3237 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003238 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003239 return err;
3240 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003241 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3242 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003243 return err;
3244}
3245
3246static void ql_disable_msix(struct ql_adapter *qdev)
3247{
3248 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3249 pci_disable_msix(qdev->pdev);
3250 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3251 kfree(qdev->msi_x_entry);
3252 qdev->msi_x_entry = NULL;
3253 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3254 pci_disable_msi(qdev->pdev);
3255 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3256 }
3257}
3258
Ron Mercera4ab6132009-08-27 11:02:10 +00003259/* We start by trying to get the number of vectors
3260 * stored in qdev->intr_count. If we don't get that
3261 * many then we reduce the count and try again.
3262 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003263static void ql_enable_msix(struct ql_adapter *qdev)
3264{
Ron Mercera4ab6132009-08-27 11:02:10 +00003265 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003266
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003267 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003268 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003269 /* Try to alloc space for the msix struct,
3270 * if it fails then go to MSI/legacy.
3271 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003272 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003273 sizeof(struct msix_entry),
3274 GFP_KERNEL);
3275 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003276 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003277 goto msi;
3278 }
3279
Ron Mercera4ab6132009-08-27 11:02:10 +00003280 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003281 qdev->msi_x_entry[i].entry = i;
3282
Ron Mercera4ab6132009-08-27 11:02:10 +00003283 /* Loop to get our vectors. We start with
3284 * what we want and settle for what we get.
3285 */
3286 do {
3287 err = pci_enable_msix(qdev->pdev,
3288 qdev->msi_x_entry, qdev->intr_count);
3289 if (err > 0)
3290 qdev->intr_count = err;
3291 } while (err > 0);
3292
3293 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003294 kfree(qdev->msi_x_entry);
3295 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003296 netif_warn(qdev, ifup, qdev->ndev,
3297 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003298 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003299 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003300 } else if (err == 0) {
3301 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003302 netif_info(qdev, ifup, qdev->ndev,
3303 "MSI-X Enabled, got %d vectors.\n",
3304 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003305 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003306 }
3307 }
3308msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003309 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003310 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003311 if (!pci_enable_msi(qdev->pdev)) {
3312 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003313 netif_info(qdev, ifup, qdev->ndev,
3314 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003315 return;
3316 }
3317 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003318 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003319 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3320 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003321}
3322
Ron Mercer39aa8162009-08-27 11:02:11 +00003323/* Each vector services 1 RSS ring and and 1 or more
3324 * TX completion rings. This function loops through
3325 * the TX completion rings and assigns the vector that
3326 * will service it. An example would be if there are
3327 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3328 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003329 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003330 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3331 */
3332static void ql_set_tx_vect(struct ql_adapter *qdev)
3333{
3334 int i, j, vect;
3335 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3336
3337 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3338 /* Assign irq vectors to TX rx_rings.*/
3339 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3340 i < qdev->rx_ring_count; i++) {
3341 if (j == tx_rings_per_vector) {
3342 vect++;
3343 j = 0;
3344 }
3345 qdev->rx_ring[i].irq = vect;
3346 j++;
3347 }
3348 } else {
3349 /* For single vector all rings have an irq
3350 * of zero.
3351 */
3352 for (i = 0; i < qdev->rx_ring_count; i++)
3353 qdev->rx_ring[i].irq = 0;
3354 }
3355}
3356
3357/* Set the interrupt mask for this vector. Each vector
3358 * will service 1 RSS ring and 1 or more TX completion
3359 * rings. This function sets up a bit mask per vector
3360 * that indicates which rings it services.
3361 */
3362static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3363{
3364 int j, vect = ctx->intr;
3365 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3366
3367 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3368 /* Add the RSS ring serviced by this vector
3369 * to the mask.
3370 */
3371 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3372 /* Add the TX ring(s) serviced by this vector
3373 * to the mask. */
3374 for (j = 0; j < tx_rings_per_vector; j++) {
3375 ctx->irq_mask |=
3376 (1 << qdev->rx_ring[qdev->rss_ring_count +
3377 (vect * tx_rings_per_vector) + j].cq_id);
3378 }
3379 } else {
3380 /* For single vector we just shift each queue's
3381 * ID into the mask.
3382 */
3383 for (j = 0; j < qdev->rx_ring_count; j++)
3384 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3385 }
3386}
3387
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003388/*
3389 * Here we build the intr_context structures based on
3390 * our rx_ring count and intr vector count.
3391 * The intr_context structure is used to hook each vector
3392 * to possibly different handlers.
3393 */
3394static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3395{
3396 int i = 0;
3397 struct intr_context *intr_context = &qdev->intr_context[0];
3398
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003399 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3400 /* Each rx_ring has it's
3401 * own intr_context since we have separate
3402 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003403 */
3404 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3405 qdev->rx_ring[i].irq = i;
3406 intr_context->intr = i;
3407 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003408 /* Set up this vector's bit-mask that indicates
3409 * which queues it services.
3410 */
3411 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003412 /*
3413 * We set up each vectors enable/disable/read bits so
3414 * there's no bit/mask calculations in the critical path.
3415 */
3416 intr_context->intr_en_mask =
3417 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3418 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3419 | i;
3420 intr_context->intr_dis_mask =
3421 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3422 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3423 INTR_EN_IHD | i;
3424 intr_context->intr_read_mask =
3425 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3426 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3427 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003428 if (i == 0) {
3429 /* The first vector/queue handles
3430 * broadcast/multicast, fatal errors,
3431 * and firmware events. This in addition
3432 * to normal inbound NAPI processing.
3433 */
3434 intr_context->handler = qlge_isr;
3435 sprintf(intr_context->name, "%s-rx-%d",
3436 qdev->ndev->name, i);
3437 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003438 /*
3439 * Inbound queues handle unicast frames only.
3440 */
3441 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003442 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003443 qdev->ndev->name, i);
3444 }
3445 }
3446 } else {
3447 /*
3448 * All rx_rings use the same intr_context since
3449 * there is only one vector.
3450 */
3451 intr_context->intr = 0;
3452 intr_context->qdev = qdev;
3453 /*
3454 * We set up each vectors enable/disable/read bits so
3455 * there's no bit/mask calculations in the critical path.
3456 */
3457 intr_context->intr_en_mask =
3458 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3459 intr_context->intr_dis_mask =
3460 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3461 INTR_EN_TYPE_DISABLE;
3462 intr_context->intr_read_mask =
3463 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3464 /*
3465 * Single interrupt means one handler for all rings.
3466 */
3467 intr_context->handler = qlge_isr;
3468 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003469 /* Set up this vector's bit-mask that indicates
3470 * which queues it services. In this case there is
3471 * a single vector so it will service all RSS and
3472 * TX completion rings.
3473 */
3474 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003475 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003476 /* Tell the TX completion rings which MSIx vector
3477 * they will be using.
3478 */
3479 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003480}
3481
3482static void ql_free_irq(struct ql_adapter *qdev)
3483{
3484 int i;
3485 struct intr_context *intr_context = &qdev->intr_context[0];
3486
3487 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3488 if (intr_context->hooked) {
3489 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3490 free_irq(qdev->msi_x_entry[i].vector,
3491 &qdev->rx_ring[i]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003492 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3493 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003494 } else {
3495 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f72010-02-09 11:49:52 +00003496 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3497 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003498 }
3499 }
3500 }
3501 ql_disable_msix(qdev);
3502}
3503
3504static int ql_request_irq(struct ql_adapter *qdev)
3505{
3506 int i;
3507 int status = 0;
3508 struct pci_dev *pdev = qdev->pdev;
3509 struct intr_context *intr_context = &qdev->intr_context[0];
3510
3511 ql_resolve_queues_to_irqs(qdev);
3512
3513 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3514 atomic_set(&intr_context->irq_cnt, 0);
3515 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3516 status = request_irq(qdev->msi_x_entry[i].vector,
3517 intr_context->handler,
3518 0,
3519 intr_context->name,
3520 &qdev->rx_ring[i]);
3521 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003522 netif_err(qdev, ifup, qdev->ndev,
3523 "Failed request for MSIX interrupt %d.\n",
3524 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003525 goto err_irq;
3526 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003527 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3528 "Hooked intr %d, queue type %s, with name %s.\n",
3529 i,
3530 qdev->rx_ring[i].type == DEFAULT_Q ?
3531 "DEFAULT_Q" :
3532 qdev->rx_ring[i].type == TX_Q ?
3533 "TX_Q" :
3534 qdev->rx_ring[i].type == RX_Q ?
3535 "RX_Q" : "",
3536 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003537 }
3538 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003539 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3540 "trying msi or legacy interrupts.\n");
3541 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3542 "%s: irq = %d.\n", __func__, pdev->irq);
3543 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3544 "%s: context->name = %s.\n", __func__,
3545 intr_context->name);
3546 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3547 "%s: dev_id = 0x%p.\n", __func__,
3548 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549 status =
3550 request_irq(pdev->irq, qlge_isr,
3551 test_bit(QL_MSI_ENABLED,
3552 &qdev->
3553 flags) ? 0 : IRQF_SHARED,
3554 intr_context->name, &qdev->rx_ring[0]);
3555 if (status)
3556 goto err_irq;
3557
Joe Perchesae9540f72010-02-09 11:49:52 +00003558 netif_err(qdev, ifup, qdev->ndev,
3559 "Hooked intr %d, queue type %s, with name %s.\n",
3560 i,
3561 qdev->rx_ring[0].type == DEFAULT_Q ?
3562 "DEFAULT_Q" :
3563 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3564 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3565 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003566 }
3567 intr_context->hooked = 1;
3568 }
3569 return status;
3570err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003571 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003572 ql_free_irq(qdev);
3573 return status;
3574}
3575
3576static int ql_start_rss(struct ql_adapter *qdev)
3577{
Joe Perches215faf92010-12-21 02:16:10 -08003578 static const u8 init_hash_seed[] = {
3579 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3580 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3581 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3582 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3583 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3584 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003585 struct ricb *ricb = &qdev->ricb;
3586 int status = 0;
3587 int i;
3588 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3589
Ron Mercere3324712009-07-02 06:06:13 +00003590 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003591
Ron Mercerb2014ff2009-08-27 11:02:09 +00003592 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003593 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003594 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3595 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003596
3597 /*
3598 * Fill out the Indirection Table.
3599 */
Ron Mercer541ae282009-10-08 09:54:37 +00003600 for (i = 0; i < 1024; i++)
3601 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003602
Ron Mercer541ae282009-10-08 09:54:37 +00003603 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3604 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003605
Joe Perchesae9540f72010-02-09 11:49:52 +00003606 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003607
Ron Mercere3324712009-07-02 06:06:13 +00003608 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003609 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003610 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003611 return status;
3612 }
Joe Perchesae9540f72010-02-09 11:49:52 +00003613 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3614 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003615 return status;
3616}
3617
Ron Mercera5f59dc2009-07-02 06:06:07 +00003618static int ql_clear_routing_entries(struct ql_adapter *qdev)
3619{
3620 int i, status = 0;
3621
3622 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3623 if (status)
3624 return status;
3625 /* Clear all the entries in the routing table. */
3626 for (i = 0; i < 16; i++) {
3627 status = ql_set_routing_reg(qdev, i, 0, 0);
3628 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003629 netif_err(qdev, ifup, qdev->ndev,
3630 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003631 break;
3632 }
3633 }
3634 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3635 return status;
3636}
3637
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003638/* Initialize the frame-to-queue routing. */
3639static int ql_route_initialize(struct ql_adapter *qdev)
3640{
3641 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003642
3643 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003644 status = ql_clear_routing_entries(qdev);
3645 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003646 return status;
3647
3648 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3649 if (status)
3650 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003651
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003652 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3653 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003654 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003655 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003656 "Failed to init routing register "
3657 "for IP CSUM error packets.\n");
3658 goto exit;
3659 }
3660 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3661 RT_IDX_TU_CSUM_ERR, 1);
3662 if (status) {
3663 netif_err(qdev, ifup, qdev->ndev,
3664 "Failed to init routing register "
3665 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003666 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003667 }
3668 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3669 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003670 netif_err(qdev, ifup, qdev->ndev,
3671 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003672 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003673 }
3674 /* If we have more than one inbound queue, then turn on RSS in the
3675 * routing block.
3676 */
3677 if (qdev->rss_ring_count > 1) {
3678 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3679 RT_IDX_RSS_MATCH, 1);
3680 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003681 netif_err(qdev, ifup, qdev->ndev,
3682 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003683 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003684 }
3685 }
3686
3687 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3688 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003689 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003690 netif_err(qdev, ifup, qdev->ndev,
3691 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003692exit:
3693 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003694 return status;
3695}
3696
Ron Mercer2ee1e272009-03-03 12:10:33 +00003697int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003698{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003699 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003700
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003701 /* If check if the link is up and use to
3702 * determine if we are setting or clearing
3703 * the MAC address in the CAM.
3704 */
3705 set = ql_read32(qdev, STS);
3706 set &= qdev->port_link_up;
3707 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003708 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003709 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003710 return status;
3711 }
3712
3713 status = ql_route_initialize(qdev);
3714 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003715 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003716
3717 return status;
3718}
3719
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003720static int ql_adapter_initialize(struct ql_adapter *qdev)
3721{
3722 u32 value, mask;
3723 int i;
3724 int status = 0;
3725
3726 /*
3727 * Set up the System register to halt on errors.
3728 */
3729 value = SYS_EFE | SYS_FAE;
3730 mask = value << 16;
3731 ql_write32(qdev, SYS, mask | value);
3732
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003733 /* Set the default queue, and VLAN behavior. */
3734 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3735 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003736 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3737
3738 /* Set the MPI interrupt to enabled. */
3739 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3740
3741 /* Enable the function, set pagesize, enable error checking. */
3742 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003743 FSC_EC | FSC_VM_PAGE_4K;
3744 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003745
3746 /* Set/clear header splitting. */
3747 mask = FSC_VM_PAGESIZE_MASK |
3748 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3749 ql_write32(qdev, FSC, mask | value);
3750
Ron Mercer572c5262010-01-02 10:37:42 +00003751 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003752
Ron Mercera3b71932009-10-08 09:54:38 +00003753 /* Set RX packet routing to use port/pci function on which the
3754 * packet arrived on in addition to usual frame routing.
3755 * This is helpful on bonding where both interfaces can have
3756 * the same MAC address.
3757 */
3758 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003759 /* Reroute all packets to our Interface.
3760 * They may have been routed to MPI firmware
3761 * due to WOL.
3762 */
3763 value = ql_read32(qdev, MGMT_RCV_CFG);
3764 value &= ~MGMT_RCV_CFG_RM;
3765 mask = 0xffff0000;
3766
3767 /* Sticky reg needs clearing due to WOL. */
3768 ql_write32(qdev, MGMT_RCV_CFG, mask);
3769 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3770
3771 /* Default WOL is enable on Mezz cards */
3772 if (qdev->pdev->subsystem_device == 0x0068 ||
3773 qdev->pdev->subsystem_device == 0x0180)
3774 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003775
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003776 /* Start up the rx queues. */
3777 for (i = 0; i < qdev->rx_ring_count; i++) {
3778 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3779 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003780 netif_err(qdev, ifup, qdev->ndev,
3781 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003782 return status;
3783 }
3784 }
3785
3786 /* If there is more than one inbound completion queue
3787 * then download a RICB to configure RSS.
3788 */
3789 if (qdev->rss_ring_count > 1) {
3790 status = ql_start_rss(qdev);
3791 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003792 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003793 return status;
3794 }
3795 }
3796
3797 /* Start up the tx queues. */
3798 for (i = 0; i < qdev->tx_ring_count; i++) {
3799 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3800 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003801 netif_err(qdev, ifup, qdev->ndev,
3802 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003803 return status;
3804 }
3805 }
3806
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003807 /* Initialize the port and set the max framesize. */
3808 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003809 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003810 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003811
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003812 /* Set up the MAC address and frame routing filter. */
3813 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003814 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003815 netif_err(qdev, ifup, qdev->ndev,
3816 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003817 return status;
3818 }
3819
3820 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003821 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003822 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3823 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003824 napi_enable(&qdev->rx_ring[i].napi);
3825 }
3826
3827 return status;
3828}
3829
3830/* Issue soft reset to chip. */
3831static int ql_adapter_reset(struct ql_adapter *qdev)
3832{
3833 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003834 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003835 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003836
Ron Mercera5f59dc2009-07-02 06:06:07 +00003837 /* Clear all the entries in the routing table. */
3838 status = ql_clear_routing_entries(qdev);
3839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003840 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003841 return status;
3842 }
3843
3844 end_jiffies = jiffies +
3845 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003846
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003847 /* Check if bit is set then skip the mailbox command and
3848 * clear the bit, else we are in normal reset process.
3849 */
3850 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3851 /* Stop management traffic. */
3852 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003853
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003854 /* Wait for the NIC and MGMNT FIFOs to empty. */
3855 ql_wait_fifo_empty(qdev);
3856 } else
3857 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003858
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003859 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003860
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003861 do {
3862 value = ql_read32(qdev, RST_FO);
3863 if ((value & RST_FO_FR) == 0)
3864 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003865 cpu_relax();
3866 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003867
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003868 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003869 netif_err(qdev, ifdown, qdev->ndev,
3870 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003871 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003872 }
3873
Ron Mercer84087f42009-10-08 09:54:41 +00003874 /* Resume management traffic. */
3875 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003876 return status;
3877}
3878
3879static void ql_display_dev_info(struct net_device *ndev)
3880{
Joe Perchesb16fed02010-11-15 11:12:28 +00003881 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003882
Joe Perchesae9540f72010-02-09 11:49:52 +00003883 netif_info(qdev, probe, qdev->ndev,
3884 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3885 "XG Roll = %d, XG Rev = %d.\n",
3886 qdev->func,
3887 qdev->port,
3888 qdev->chip_rev_id & 0x0000000f,
3889 qdev->chip_rev_id >> 4 & 0x0000000f,
3890 qdev->chip_rev_id >> 8 & 0x0000000f,
3891 qdev->chip_rev_id >> 12 & 0x0000000f);
3892 netif_info(qdev, probe, qdev->ndev,
3893 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003894}
3895
stephen hemmingerac409212010-10-21 07:50:54 +00003896static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003897{
3898 int status = 0;
3899 u32 wol = MB_WOL_DISABLE;
3900
3901 /* The CAM is still intact after a reset, but if we
3902 * are doing WOL, then we may need to program the
3903 * routing regs. We would also need to issue the mailbox
3904 * commands to instruct the MPI what to do per the ethtool
3905 * settings.
3906 */
3907
3908 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3909 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003910 netif_err(qdev, ifdown, qdev->ndev,
3911 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3912 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003913 return -EINVAL;
3914 }
3915
3916 if (qdev->wol & WAKE_MAGIC) {
3917 status = ql_mb_wol_set_magic(qdev, 1);
3918 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003919 netif_err(qdev, ifdown, qdev->ndev,
3920 "Failed to set magic packet on %s.\n",
3921 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003922 return status;
3923 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003924 netif_info(qdev, drv, qdev->ndev,
3925 "Enabled magic packet successfully on %s.\n",
3926 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003927
3928 wol |= MB_WOL_MAGIC_PKT;
3929 }
3930
3931 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003932 wol |= MB_WOL_MODE_ON;
3933 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003934 netif_err(qdev, drv, qdev->ndev,
3935 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003936 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003937 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003938 }
3939
3940 return status;
3941}
3942
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003943static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003944{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003945
Ron Mercer6497b602009-02-12 16:37:13 -08003946 /* Don't kill the reset worker thread if we
3947 * are in the process of recovery.
3948 */
3949 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3950 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003951 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3952 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003953 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003954 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003955 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003956}
3957
3958static int ql_adapter_down(struct ql_adapter *qdev)
3959{
3960 int i, status = 0;
3961
3962 ql_link_off(qdev);
3963
3964 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003965
Ron Mercer39aa8162009-08-27 11:02:11 +00003966 for (i = 0; i < qdev->rss_ring_count; i++)
3967 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003968
3969 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3970
3971 ql_disable_interrupts(qdev);
3972
3973 ql_tx_ring_clean(qdev);
3974
Ron Mercer6b318cb2009-03-09 10:59:26 +00003975 /* Call netif_napi_del() from common point.
3976 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003977 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003978 netif_napi_del(&qdev->rx_ring[i].napi);
3979
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003980 status = ql_adapter_reset(qdev);
3981 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003982 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3983 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003984 ql_free_rx_buffers(qdev);
3985
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003986 return status;
3987}
3988
3989static int ql_adapter_up(struct ql_adapter *qdev)
3990{
3991 int err = 0;
3992
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003993 err = ql_adapter_initialize(qdev);
3994 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003995 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003996 goto err_init;
3997 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003998 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003999 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00004000 /* If the port is initialized and the
4001 * link is up the turn on the carrier.
4002 */
4003 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4004 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00004005 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004006 /* Restore rx mode. */
4007 clear_bit(QL_ALLMULTI, &qdev->flags);
4008 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4009 qlge_set_multicast_list(qdev->ndev);
4010
Ron Mercerc1b60092010-10-27 04:58:12 +00004011 /* Restore vlan setting. */
4012 qlge_restore_vlan(qdev);
4013
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004014 ql_enable_interrupts(qdev);
4015 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004016 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004017
4018 return 0;
4019err_init:
4020 ql_adapter_reset(qdev);
4021 return err;
4022}
4023
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004024static void ql_release_adapter_resources(struct ql_adapter *qdev)
4025{
4026 ql_free_mem_resources(qdev);
4027 ql_free_irq(qdev);
4028}
4029
4030static int ql_get_adapter_resources(struct ql_adapter *qdev)
4031{
4032 int status = 0;
4033
4034 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004035 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004036 return -ENOMEM;
4037 }
4038 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004039 return status;
4040}
4041
4042static int qlge_close(struct net_device *ndev)
4043{
4044 struct ql_adapter *qdev = netdev_priv(ndev);
4045
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004046 /* If we hit pci_channel_io_perm_failure
4047 * failure condition, then we already
4048 * brought the adapter down.
4049 */
4050 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004051 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004052 clear_bit(QL_EEH_FATAL, &qdev->flags);
4053 return 0;
4054 }
4055
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004056 /*
4057 * Wait for device to recover from a reset.
4058 * (Rarely happens, but possible.)
4059 */
4060 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4061 msleep(1);
4062 ql_adapter_down(qdev);
4063 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004064 return 0;
4065}
4066
4067static int ql_configure_rings(struct ql_adapter *qdev)
4068{
4069 int i;
4070 struct rx_ring *rx_ring;
4071 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004072 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004073 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4074 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4075
4076 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004077
Ron Mercera4ab6132009-08-27 11:02:10 +00004078 /* In a perfect world we have one RSS ring for each CPU
4079 * and each has it's own vector. To do that we ask for
4080 * cpu_cnt vectors. ql_enable_msix() will adjust the
4081 * vector count to what we actually get. We then
4082 * allocate an RSS ring for each.
4083 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004084 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004085 qdev->intr_count = cpu_cnt;
4086 ql_enable_msix(qdev);
4087 /* Adjust the RSS ring count to the actual vector count. */
4088 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004089 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004090 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004091
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004092 for (i = 0; i < qdev->tx_ring_count; i++) {
4093 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004094 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004095 tx_ring->qdev = qdev;
4096 tx_ring->wq_id = i;
4097 tx_ring->wq_len = qdev->tx_ring_size;
4098 tx_ring->wq_size =
4099 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4100
4101 /*
4102 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004103 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004104 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004105 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004106 }
4107
4108 for (i = 0; i < qdev->rx_ring_count; i++) {
4109 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004110 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004111 rx_ring->qdev = qdev;
4112 rx_ring->cq_id = i;
4113 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004114 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004115 /*
4116 * Inbound (RSS) queues.
4117 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004118 rx_ring->cq_len = qdev->rx_ring_size;
4119 rx_ring->cq_size =
4120 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4121 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4122 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004123 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004124 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f72010-02-09 11:49:52 +00004125 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4126 "lbq_buf_size %d, order = %d\n",
4127 rx_ring->lbq_buf_size,
4128 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004129 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4130 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004131 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004132 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004133 rx_ring->type = RX_Q;
4134 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004135 /*
4136 * Outbound queue handles outbound completions only.
4137 */
4138 /* outbound cq is same size as tx_ring it services. */
4139 rx_ring->cq_len = qdev->tx_ring_size;
4140 rx_ring->cq_size =
4141 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4142 rx_ring->lbq_len = 0;
4143 rx_ring->lbq_size = 0;
4144 rx_ring->lbq_buf_size = 0;
4145 rx_ring->sbq_len = 0;
4146 rx_ring->sbq_size = 0;
4147 rx_ring->sbq_buf_size = 0;
4148 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004149 }
4150 }
4151 return 0;
4152}
4153
4154static int qlge_open(struct net_device *ndev)
4155{
4156 int err = 0;
4157 struct ql_adapter *qdev = netdev_priv(ndev);
4158
Ron Mercer74e12432009-11-11 12:54:04 +00004159 err = ql_adapter_reset(qdev);
4160 if (err)
4161 return err;
4162
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004163 err = ql_configure_rings(qdev);
4164 if (err)
4165 return err;
4166
4167 err = ql_get_adapter_resources(qdev);
4168 if (err)
4169 goto error_up;
4170
4171 err = ql_adapter_up(qdev);
4172 if (err)
4173 goto error_up;
4174
4175 return err;
4176
4177error_up:
4178 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004179 return err;
4180}
4181
Ron Mercer7c734352009-10-19 03:32:19 +00004182static int ql_change_rx_buffers(struct ql_adapter *qdev)
4183{
4184 struct rx_ring *rx_ring;
4185 int i, status;
4186 u32 lbq_buf_len;
4187
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004188 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004189 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4190 int i = 3;
4191 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004192 netif_err(qdev, ifup, qdev->ndev,
4193 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004194 ssleep(1);
4195 }
4196
4197 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004198 netif_err(qdev, ifup, qdev->ndev,
4199 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004200 return -ETIMEDOUT;
4201 }
4202 }
4203
4204 status = ql_adapter_down(qdev);
4205 if (status)
4206 goto error;
4207
4208 /* Get the new rx buffer size. */
4209 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4210 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4211 qdev->lbq_buf_order = get_order(lbq_buf_len);
4212
4213 for (i = 0; i < qdev->rss_ring_count; i++) {
4214 rx_ring = &qdev->rx_ring[i];
4215 /* Set the new size. */
4216 rx_ring->lbq_buf_size = lbq_buf_len;
4217 }
4218
4219 status = ql_adapter_up(qdev);
4220 if (status)
4221 goto error;
4222
4223 return status;
4224error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004225 netif_alert(qdev, ifup, qdev->ndev,
4226 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004227 set_bit(QL_ADAPTER_UP, &qdev->flags);
4228 dev_close(qdev->ndev);
4229 return status;
4230}
4231
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004232static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4233{
4234 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004235 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004236
4237 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004238 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004239 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004240 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004241 } else
4242 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004243
4244 queue_delayed_work(qdev->workqueue,
4245 &qdev->mpi_port_cfg_work, 3*HZ);
4246
Breno Leitao746079d2010-02-04 10:11:19 +00004247 ndev->mtu = new_mtu;
4248
Ron Mercer7c734352009-10-19 03:32:19 +00004249 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004250 return 0;
4251 }
4252
Ron Mercer7c734352009-10-19 03:32:19 +00004253 status = ql_change_rx_buffers(qdev);
4254 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004255 netif_err(qdev, ifup, qdev->ndev,
4256 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004257 }
4258
4259 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004260}
4261
4262static struct net_device_stats *qlge_get_stats(struct net_device
4263 *ndev)
4264{
Ron Mercer885ee392009-11-03 13:49:31 +00004265 struct ql_adapter *qdev = netdev_priv(ndev);
4266 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4267 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4268 unsigned long pkts, mcast, dropped, errors, bytes;
4269 int i;
4270
4271 /* Get RX stats. */
4272 pkts = mcast = dropped = errors = bytes = 0;
4273 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4274 pkts += rx_ring->rx_packets;
4275 bytes += rx_ring->rx_bytes;
4276 dropped += rx_ring->rx_dropped;
4277 errors += rx_ring->rx_errors;
4278 mcast += rx_ring->rx_multicast;
4279 }
4280 ndev->stats.rx_packets = pkts;
4281 ndev->stats.rx_bytes = bytes;
4282 ndev->stats.rx_dropped = dropped;
4283 ndev->stats.rx_errors = errors;
4284 ndev->stats.multicast = mcast;
4285
4286 /* Get TX stats. */
4287 pkts = errors = bytes = 0;
4288 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4289 pkts += tx_ring->tx_packets;
4290 bytes += tx_ring->tx_bytes;
4291 errors += tx_ring->tx_errors;
4292 }
4293 ndev->stats.tx_packets = pkts;
4294 ndev->stats.tx_bytes = bytes;
4295 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004296 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004297}
4298
stephen hemmingerac409212010-10-21 07:50:54 +00004299static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004300{
Joe Perchesb16fed02010-11-15 11:12:28 +00004301 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004302 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004303 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004304
Ron Mercercc288f52009-02-23 10:42:14 +00004305 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4306 if (status)
4307 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004308 /*
4309 * Set or clear promiscuous mode if a
4310 * transition is taking place.
4311 */
4312 if (ndev->flags & IFF_PROMISC) {
4313 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4314 if (ql_set_routing_reg
4315 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004316 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004317 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004318 } else {
4319 set_bit(QL_PROMISCUOUS, &qdev->flags);
4320 }
4321 }
4322 } else {
4323 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4324 if (ql_set_routing_reg
4325 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004326 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004327 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004328 } else {
4329 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4330 }
4331 }
4332 }
4333
4334 /*
4335 * Set or clear all multicast mode if a
4336 * transition is taking place.
4337 */
4338 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004339 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004340 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4341 if (ql_set_routing_reg
4342 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004343 netif_err(qdev, hw, qdev->ndev,
4344 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004345 } else {
4346 set_bit(QL_ALLMULTI, &qdev->flags);
4347 }
4348 }
4349 } else {
4350 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4351 if (ql_set_routing_reg
4352 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004353 netif_err(qdev, hw, qdev->ndev,
4354 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004355 } else {
4356 clear_bit(QL_ALLMULTI, &qdev->flags);
4357 }
4358 }
4359 }
4360
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004361 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004362 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4363 if (status)
4364 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004365 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004366 netdev_for_each_mc_addr(ha, ndev) {
4367 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004368 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004369 netif_err(qdev, hw, qdev->ndev,
4370 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004371 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004372 goto exit;
4373 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004374 i++;
4375 }
Ron Mercercc288f52009-02-23 10:42:14 +00004376 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004377 if (ql_set_routing_reg
4378 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004379 netif_err(qdev, hw, qdev->ndev,
4380 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004381 } else {
4382 set_bit(QL_ALLMULTI, &qdev->flags);
4383 }
4384 }
4385exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004386 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004387}
4388
4389static int qlge_set_mac_address(struct net_device *ndev, void *p)
4390{
Joe Perchesb16fed02010-11-15 11:12:28 +00004391 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004392 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004393 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004394
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004395 if (!is_valid_ether_addr(addr->sa_data))
4396 return -EADDRNOTAVAIL;
4397 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004398 /* Update local copy of current mac address. */
4399 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004400
Ron Mercercc288f52009-02-23 10:42:14 +00004401 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4402 if (status)
4403 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004404 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4405 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004406 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004407 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004408 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4409 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004410}
4411
4412static void qlge_tx_timeout(struct net_device *ndev)
4413{
Joe Perchesb16fed02010-11-15 11:12:28 +00004414 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004415 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004416}
4417
4418static void ql_asic_reset_work(struct work_struct *work)
4419{
4420 struct ql_adapter *qdev =
4421 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004422 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004423 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004424 status = ql_adapter_down(qdev);
4425 if (status)
4426 goto error;
4427
4428 status = ql_adapter_up(qdev);
4429 if (status)
4430 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004431
4432 /* Restore rx mode. */
4433 clear_bit(QL_ALLMULTI, &qdev->flags);
4434 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4435 qlge_set_multicast_list(qdev->ndev);
4436
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004437 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004438 return;
4439error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004440 netif_alert(qdev, ifup, qdev->ndev,
4441 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004442
Ron Mercerdb988122009-03-09 10:59:17 +00004443 set_bit(QL_ADAPTER_UP, &qdev->flags);
4444 dev_close(qdev->ndev);
4445 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004446}
4447
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004448static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004449 .get_flash = ql_get_8012_flash_params,
4450 .port_initialize = ql_8012_port_initialize,
4451};
4452
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004453static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004454 .get_flash = ql_get_8000_flash_params,
4455 .port_initialize = ql_8000_port_initialize,
4456};
4457
Ron Mercere4552f52009-06-09 05:39:32 +00004458/* Find the pcie function number for the other NIC
4459 * on this chip. Since both NIC functions share a
4460 * common firmware we have the lowest enabled function
4461 * do any common work. Examples would be resetting
4462 * after a fatal firmware error, or doing a firmware
4463 * coredump.
4464 */
4465static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004466{
Ron Mercere4552f52009-06-09 05:39:32 +00004467 int status = 0;
4468 u32 temp;
4469 u32 nic_func1, nic_func2;
4470
4471 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4472 &temp);
4473 if (status)
4474 return status;
4475
4476 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4477 MPI_TEST_NIC_FUNC_MASK);
4478 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4479 MPI_TEST_NIC_FUNC_MASK);
4480
4481 if (qdev->func == nic_func1)
4482 qdev->alt_func = nic_func2;
4483 else if (qdev->func == nic_func2)
4484 qdev->alt_func = nic_func1;
4485 else
4486 status = -EIO;
4487
4488 return status;
4489}
4490
4491static int ql_get_board_info(struct ql_adapter *qdev)
4492{
4493 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004494 qdev->func =
4495 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004496 if (qdev->func > 3)
4497 return -EIO;
4498
4499 status = ql_get_alt_pcie_func(qdev);
4500 if (status)
4501 return status;
4502
4503 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4504 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004505 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4506 qdev->port_link_up = STS_PL1;
4507 qdev->port_init = STS_PI1;
4508 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4509 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4510 } else {
4511 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4512 qdev->port_link_up = STS_PL0;
4513 qdev->port_init = STS_PI0;
4514 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4515 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4516 }
4517 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004518 qdev->device_id = qdev->pdev->device;
4519 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4520 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004521 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4522 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004523 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004524}
4525
4526static void ql_release_all(struct pci_dev *pdev)
4527{
4528 struct net_device *ndev = pci_get_drvdata(pdev);
4529 struct ql_adapter *qdev = netdev_priv(ndev);
4530
4531 if (qdev->workqueue) {
4532 destroy_workqueue(qdev->workqueue);
4533 qdev->workqueue = NULL;
4534 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004535
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004536 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004537 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004538 if (qdev->doorbell_area)
4539 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004540 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004541 pci_release_regions(pdev);
4542 pci_set_drvdata(pdev, NULL);
4543}
4544
4545static int __devinit ql_init_device(struct pci_dev *pdev,
4546 struct net_device *ndev, int cards_found)
4547{
4548 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004549 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004550
Ron Mercere3324712009-07-02 06:06:13 +00004551 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004552 err = pci_enable_device(pdev);
4553 if (err) {
4554 dev_err(&pdev->dev, "PCI device enable failed.\n");
4555 return err;
4556 }
4557
Ron Mercerebd6e772009-09-29 08:39:25 +00004558 qdev->ndev = ndev;
4559 qdev->pdev = pdev;
4560 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004561
Ron Mercerbc9167f2009-10-10 09:35:04 +00004562 /* Set PCIe read request size */
4563 err = pcie_set_readrq(pdev, 4096);
4564 if (err) {
4565 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004566 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004567 }
4568
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004569 err = pci_request_regions(pdev, DRV_NAME);
4570 if (err) {
4571 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004572 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004573 }
4574
4575 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004576 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004577 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004578 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004579 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004580 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004581 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004582 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004583 }
4584
4585 if (err) {
4586 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004587 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004588 }
4589
Ron Mercer73475332009-11-06 07:44:58 +00004590 /* Set PCIe reset type for EEH to fundamental. */
4591 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004592 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004593 qdev->reg_base =
4594 ioremap_nocache(pci_resource_start(pdev, 1),
4595 pci_resource_len(pdev, 1));
4596 if (!qdev->reg_base) {
4597 dev_err(&pdev->dev, "Register mapping failed.\n");
4598 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004599 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004600 }
4601
4602 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4603 qdev->doorbell_area =
4604 ioremap_nocache(pci_resource_start(pdev, 3),
4605 pci_resource_len(pdev, 3));
4606 if (!qdev->doorbell_area) {
4607 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4608 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004609 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004610 }
4611
Ron Mercere4552f52009-06-09 05:39:32 +00004612 err = ql_get_board_info(qdev);
4613 if (err) {
4614 dev_err(&pdev->dev, "Register access failed.\n");
4615 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004616 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004617 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004618 qdev->msg_enable = netif_msg_init(debug, default_msg);
4619 spin_lock_init(&qdev->hw_lock);
4620 spin_lock_init(&qdev->stats_lock);
4621
Ron Mercer8aae2602010-01-15 13:31:28 +00004622 if (qlge_mpi_coredump) {
4623 qdev->mpi_coredump =
4624 vmalloc(sizeof(struct ql_mpi_coredump));
4625 if (qdev->mpi_coredump == NULL) {
4626 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4627 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004628 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004629 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004630 if (qlge_force_coredump)
4631 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004632 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004633 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004634 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004635 if (err) {
4636 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004637 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004638 }
4639
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004640 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004641 /* Keep local copy of current mac address. */
4642 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004643
4644 /* Set up the default ring sizes. */
4645 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4646 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4647
4648 /* Set up the coalescing parameters. */
4649 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4650 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4651 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4652 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4653
4654 /*
4655 * Set up the operating parameters.
4656 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004657 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4658 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4659 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4660 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004661 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004662 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004663 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004664 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004665 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004666
4667 if (!cards_found) {
4668 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4669 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4670 DRV_NAME, DRV_VERSION);
4671 }
4672 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004673err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004674 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004675err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004676 pci_disable_device(pdev);
4677 return err;
4678}
4679
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004680static const struct net_device_ops qlge_netdev_ops = {
4681 .ndo_open = qlge_open,
4682 .ndo_stop = qlge_close,
4683 .ndo_start_xmit = qlge_send,
4684 .ndo_change_mtu = qlge_change_mtu,
4685 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004686 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004687 .ndo_set_mac_address = qlge_set_mac_address,
4688 .ndo_validate_addr = eth_validate_addr,
4689 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004690 .ndo_fix_features = qlge_fix_features,
4691 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004692 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4693 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004694};
4695
Ron Mercer15c052f2010-02-04 13:32:46 -08004696static void ql_timer(unsigned long data)
4697{
4698 struct ql_adapter *qdev = (struct ql_adapter *)data;
4699 u32 var = 0;
4700
4701 var = ql_read32(qdev, STS);
4702 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004703 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004704 return;
4705 }
4706
Breno Leitao72046d82010-07-01 03:00:17 +00004707 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004708}
4709
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004710static int __devinit qlge_probe(struct pci_dev *pdev,
4711 const struct pci_device_id *pci_entry)
4712{
4713 struct net_device *ndev = NULL;
4714 struct ql_adapter *qdev = NULL;
4715 static int cards_found = 0;
4716 int err = 0;
4717
Ron Mercer1e213302009-03-09 10:59:21 +00004718 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4719 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004720 if (!ndev)
4721 return -ENOMEM;
4722
4723 err = ql_init_device(pdev, ndev, cards_found);
4724 if (err < 0) {
4725 free_netdev(ndev);
4726 return err;
4727 }
4728
4729 qdev = netdev_priv(ndev);
4730 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004731 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4732 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4733 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4734 ndev->features = ndev->hw_features |
4735 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004736
4737 if (test_bit(QL_DMA64, &qdev->flags))
4738 ndev->features |= NETIF_F_HIGHDMA;
4739
4740 /*
4741 * Set up net_device structure.
4742 */
4743 ndev->tx_queue_len = qdev->tx_ring_size;
4744 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004745
4746 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004747 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004748 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004749
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004750 err = register_netdev(ndev);
4751 if (err) {
4752 dev_err(&pdev->dev, "net device registration failed.\n");
4753 ql_release_all(pdev);
4754 pci_disable_device(pdev);
4755 return err;
4756 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004757 /* Start up the timer to trigger EEH if
4758 * the bus goes dead
4759 */
4760 init_timer_deferrable(&qdev->timer);
4761 qdev->timer.data = (unsigned long)qdev;
4762 qdev->timer.function = ql_timer;
4763 qdev->timer.expires = jiffies + (5*HZ);
4764 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004765 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004766 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004767 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004768 cards_found++;
4769 return 0;
4770}
4771
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004772netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4773{
4774 return qlge_send(skb, ndev);
4775}
4776
4777int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4778{
4779 return ql_clean_inbound_rx_ring(rx_ring, budget);
4780}
4781
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004782static void __devexit qlge_remove(struct pci_dev *pdev)
4783{
4784 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004785 struct ql_adapter *qdev = netdev_priv(ndev);
4786 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004787 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004788 unregister_netdev(ndev);
4789 ql_release_all(pdev);
4790 pci_disable_device(pdev);
4791 free_netdev(ndev);
4792}
4793
Ron Mercer6d190c62009-10-28 08:39:20 +00004794/* Clean up resources without touching hardware. */
4795static void ql_eeh_close(struct net_device *ndev)
4796{
4797 int i;
4798 struct ql_adapter *qdev = netdev_priv(ndev);
4799
4800 if (netif_carrier_ok(ndev)) {
4801 netif_carrier_off(ndev);
4802 netif_stop_queue(ndev);
4803 }
4804
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004805 /* Disabling the timer */
4806 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004807 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004808
4809 for (i = 0; i < qdev->rss_ring_count; i++)
4810 netif_napi_del(&qdev->rx_ring[i].napi);
4811
4812 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4813 ql_tx_ring_clean(qdev);
4814 ql_free_rx_buffers(qdev);
4815 ql_release_adapter_resources(qdev);
4816}
4817
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004818/*
4819 * This callback is called by the PCI subsystem whenever
4820 * a PCI bus error is detected.
4821 */
4822static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4823 enum pci_channel_state state)
4824{
4825 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004826 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004827
Ron Mercer6d190c62009-10-28 08:39:20 +00004828 switch (state) {
4829 case pci_channel_io_normal:
4830 return PCI_ERS_RESULT_CAN_RECOVER;
4831 case pci_channel_io_frozen:
4832 netif_device_detach(ndev);
4833 if (netif_running(ndev))
4834 ql_eeh_close(ndev);
4835 pci_disable_device(pdev);
4836 return PCI_ERS_RESULT_NEED_RESET;
4837 case pci_channel_io_perm_failure:
4838 dev_err(&pdev->dev,
4839 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004840 ql_eeh_close(ndev);
4841 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004842 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004843 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004844
4845 /* Request a slot reset. */
4846 return PCI_ERS_RESULT_NEED_RESET;
4847}
4848
4849/*
4850 * This callback is called after the PCI buss has been reset.
4851 * Basically, this tries to restart the card from scratch.
4852 * This is a shortened version of the device probe/discovery code,
4853 * it resembles the first-half of the () routine.
4854 */
4855static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4856{
4857 struct net_device *ndev = pci_get_drvdata(pdev);
4858 struct ql_adapter *qdev = netdev_priv(ndev);
4859
Ron Mercer6d190c62009-10-28 08:39:20 +00004860 pdev->error_state = pci_channel_io_normal;
4861
4862 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004863 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004864 netif_err(qdev, ifup, qdev->ndev,
4865 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004866 return PCI_ERS_RESULT_DISCONNECT;
4867 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004868 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004869
4870 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004871 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004872 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004873 return PCI_ERS_RESULT_DISCONNECT;
4874 }
4875
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004876 return PCI_ERS_RESULT_RECOVERED;
4877}
4878
4879static void qlge_io_resume(struct pci_dev *pdev)
4880{
4881 struct net_device *ndev = pci_get_drvdata(pdev);
4882 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004883 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004884
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004885 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004886 err = qlge_open(ndev);
4887 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004888 netif_err(qdev, ifup, qdev->ndev,
4889 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004890 return;
4891 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004892 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004893 netif_err(qdev, ifup, qdev->ndev,
4894 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004895 }
Breno Leitao72046d82010-07-01 03:00:17 +00004896 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004897 netif_device_attach(ndev);
4898}
4899
4900static struct pci_error_handlers qlge_err_handler = {
4901 .error_detected = qlge_io_error_detected,
4902 .slot_reset = qlge_io_slot_reset,
4903 .resume = qlge_io_resume,
4904};
4905
4906static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4907{
4908 struct net_device *ndev = pci_get_drvdata(pdev);
4909 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004910 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004911
4912 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004913 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004914
4915 if (netif_running(ndev)) {
4916 err = ql_adapter_down(qdev);
4917 if (!err)
4918 return err;
4919 }
4920
Ron Mercerbc083ce2009-10-21 11:07:40 +00004921 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004922 err = pci_save_state(pdev);
4923 if (err)
4924 return err;
4925
4926 pci_disable_device(pdev);
4927
4928 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4929
4930 return 0;
4931}
4932
David S. Miller04da2cf2008-09-19 16:14:24 -07004933#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004934static int qlge_resume(struct pci_dev *pdev)
4935{
4936 struct net_device *ndev = pci_get_drvdata(pdev);
4937 struct ql_adapter *qdev = netdev_priv(ndev);
4938 int err;
4939
4940 pci_set_power_state(pdev, PCI_D0);
4941 pci_restore_state(pdev);
4942 err = pci_enable_device(pdev);
4943 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004944 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004945 return err;
4946 }
4947 pci_set_master(pdev);
4948
4949 pci_enable_wake(pdev, PCI_D3hot, 0);
4950 pci_enable_wake(pdev, PCI_D3cold, 0);
4951
4952 if (netif_running(ndev)) {
4953 err = ql_adapter_up(qdev);
4954 if (err)
4955 return err;
4956 }
4957
Breno Leitao72046d82010-07-01 03:00:17 +00004958 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004959 netif_device_attach(ndev);
4960
4961 return 0;
4962}
David S. Miller04da2cf2008-09-19 16:14:24 -07004963#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004964
4965static void qlge_shutdown(struct pci_dev *pdev)
4966{
4967 qlge_suspend(pdev, PMSG_SUSPEND);
4968}
4969
4970static struct pci_driver qlge_driver = {
4971 .name = DRV_NAME,
4972 .id_table = qlge_pci_tbl,
4973 .probe = qlge_probe,
4974 .remove = __devexit_p(qlge_remove),
4975#ifdef CONFIG_PM
4976 .suspend = qlge_suspend,
4977 .resume = qlge_resume,
4978#endif
4979 .shutdown = qlge_shutdown,
4980 .err_handler = &qlge_err_handler
4981};
4982
4983static int __init qlge_init_module(void)
4984{
4985 return pci_register_driver(&qlge_driver);
4986}
4987
4988static void __exit qlge_exit(void)
4989{
4990 pci_unregister_driver(&qlge_driver);
4991}
4992
4993module_init(qlge_init_module);
4994module_exit(qlge_exit);