blob: b54898737284199183c1c6708b184763c648f057 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f2010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
378
Joe Perchesae9540f2010-02-09 11:49:52 +0000379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 "Adding %s address %pM at index %d in the CAM.\n",
381 type == MAC_ADDR_TYPE_MULTI_MAC ?
382 "MULTICAST" : "UNICAST",
383 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400384
385 status =
386 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800387 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400388 if (status)
389 goto exit;
390 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 type); /* type */
393 ql_write32(qdev, MAC_ADDR_DATA, lower);
394 status =
395 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800396 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400397 if (status)
398 goto exit;
399 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 type); /* type */
402 ql_write32(qdev, MAC_ADDR_DATA, upper);
403 status =
404 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800405 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400406 if (status)
407 goto exit;
408 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
409 (index << MAC_ADDR_IDX_SHIFT) | /* index */
410 type); /* type */
411 /* This field should also include the queue id
412 and possibly the function id. Right now we hardcode
413 the route field to NIC core.
414 */
Ron Mercer76b26692009-10-08 09:54:40 +0000415 cam_output = (CAM_OUT_ROUTE_NIC |
416 (qdev->
417 func << CAM_OUT_FUNC_SHIFT) |
418 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000419 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000420 cam_output |= CAM_OUT_RV;
421 /* route to NIC core */
422 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400423 break;
424 }
425 case MAC_ADDR_TYPE_VLAN:
426 {
427 u32 enable_bit = *((u32 *) &addr[0]);
428 /* For VLAN, the addr actually holds a bit that
429 * either enables or disables the vlan id we are
430 * addressing. It's either MAC_ADDR_E on or off.
431 * That's bit-27 we're talking about.
432 */
Joe Perchesae9540f2010-02-09 11:49:52 +0000433 netif_info(qdev, ifup, qdev->ndev,
434 "%s VLAN ID %d %s the CAM.\n",
435 enable_bit ? "Adding" : "Removing",
436 index,
437 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400438
439 status =
440 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800441 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 if (status)
443 goto exit;
444 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
445 (index << MAC_ADDR_IDX_SHIFT) | /* index */
446 type | /* type */
447 enable_bit); /* enable/disable */
448 break;
449 }
450 case MAC_ADDR_TYPE_MULTI_FLTR:
451 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000452 netif_crit(qdev, ifup, qdev->ndev,
453 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400454 status = -EPERM;
455 }
456exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400457 return status;
458}
459
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000460/* Set or clear MAC address in hardware. We sometimes
461 * have to clear it to prevent wrong frame routing
462 * especially in a bonding environment.
463 */
464static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465{
466 int status;
467 char zero_mac_addr[ETH_ALEN];
468 char *addr;
469
470 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000471 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f2010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000474 } else {
475 memset(zero_mac_addr, 0, ETH_ALEN);
476 addr = &zero_mac_addr[0];
Joe Perchesae9540f2010-02-09 11:49:52 +0000477 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000479 }
480 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
482 return status;
483 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +0000487 netif_err(qdev, ifup, qdev->ndev,
488 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000489 return status;
490}
491
Ron Mercer6a473302009-07-02 06:06:12 +0000492void ql_link_on(struct ql_adapter *qdev)
493{
Joe Perchesae9540f2010-02-09 11:49:52 +0000494 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000495 netif_carrier_on(qdev->ndev);
496 ql_set_mac_addr(qdev, 1);
497}
498
499void ql_link_off(struct ql_adapter *qdev)
500{
Joe Perchesae9540f2010-02-09 11:49:52 +0000501 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000502 netif_carrier_off(qdev->ndev);
503 ql_set_mac_addr(qdev, 0);
504}
505
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400506/* Get a specific frame routing value from the CAM.
507 * Used for debug and reg dump.
508 */
509int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510{
511 int status = 0;
512
Ron Mercer939678f2009-01-04 17:08:29 -0800513 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400514 if (status)
515 goto exit;
516
517 ql_write32(qdev, RT_IDX,
518 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800519 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400520 if (status)
521 goto exit;
522 *value = ql_read32(qdev, RT_DATA);
523exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 return status;
525}
526
527/* The NIC function for this chip has 16 routing indexes. Each one can be used
528 * to route different frame types to various inbound queues. We send broadcast/
529 * multicast/error frames to the default queue for slow handling,
530 * and CAM hit/RSS frames to the fast handling queues.
531 */
532static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 int enable)
534{
Ron Mercer8587ea32009-02-23 10:42:15 +0000535 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400536 u32 value = 0;
537
Joe Perchesae9540f2010-02-09 11:49:52 +0000538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 "%s %s mask %s the routing reg.\n",
540 enable ? "Adding" : "Removing",
541 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 "(Bad index != RT_IDX)",
558 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400559
560 switch (mask) {
561 case RT_IDX_CAM_HIT:
562 {
563 value = RT_IDX_DST_CAM_Q | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 break;
567 }
568 case RT_IDX_VALID: /* Promiscuous Mode frames. */
569 {
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 break;
574 }
575 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
576 {
577 value = RT_IDX_DST_DFLT_Q | /* dest */
578 RT_IDX_TYPE_NICQ | /* type */
579 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 break;
581 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000582 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583 {
584 value = RT_IDX_DST_DFLT_Q | /* dest */
585 RT_IDX_TYPE_NICQ | /* type */
586 (RT_IDX_IP_CSUM_ERR_SLOT <<
587 RT_IDX_IDX_SHIFT); /* index */
588 break;
589 }
590 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591 {
592 value = RT_IDX_DST_DFLT_Q | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 RT_IDX_IDX_SHIFT); /* index */
596 break;
597 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400598 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
599 {
600 value = RT_IDX_DST_DFLT_Q | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
606 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000607 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400608 RT_IDX_TYPE_NICQ | /* type */
609 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
613 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000614 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 RT_IDX_TYPE_NICQ | /* type */
616 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 break;
618 }
619 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
620 {
621 value = RT_IDX_DST_RSS | /* dest */
622 RT_IDX_TYPE_NICQ | /* type */
623 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 break;
625 }
626 case 0: /* Clear the E-bit on an entry. */
627 {
628 value = RT_IDX_DST_DFLT_Q | /* dest */
629 RT_IDX_TYPE_NICQ | /* type */
630 (index << RT_IDX_IDX_SHIFT);/* index */
631 break;
632 }
633 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000634 netif_err(qdev, ifup, qdev->ndev,
635 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400636 status = -EPERM;
637 goto exit;
638 }
639
640 if (value) {
641 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 if (status)
643 goto exit;
644 value |= (enable ? RT_IDX_E : 0);
645 ql_write32(qdev, RT_IDX, value);
646 ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 }
648exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 return status;
650}
651
652static void ql_enable_interrupts(struct ql_adapter *qdev)
653{
654 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655}
656
657static void ql_disable_interrupts(struct ql_adapter *qdev)
658{
659 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660}
661
662/* If we're running with multiple MSI-X vectors then we enable on the fly.
663 * Otherwise, we may have multiple outstanding workers and don't want to
664 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300665 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400666 * a worker finishes. Once it hits zero we enable the interrupt.
667 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 u32 var = 0;
671 unsigned long hw_flags = 0;
672 struct intr_context *ctx = qdev->intr_context + intr;
673
674 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 /* Always enable if we're MSIX multi interrupts and
676 * it's not the default (zeroeth) interrupt.
677 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400678 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700679 ctx->intr_en_mask);
680 var = ql_read32(qdev, STS);
681 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683
684 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 ql_write32(qdev, INTR_EN,
687 ctx->intr_en_mask);
688 var = ql_read32(qdev, STS);
689 }
690 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400692}
693
694static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695{
696 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700697 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400698
Ron Mercerbb0d2152008-10-20 10:30:26 -0700699 /* HW disables for us if we're MSIX multi interrupts and
700 * it's not the default (zeroeth) interrupt.
701 */
702 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 return 0;
704
705 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000706 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700707 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400708 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700709 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400710 var = ql_read32(qdev, STS);
711 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700712 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000713 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400714 return var;
715}
716
717static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718{
719 int i;
720 for (i = 0; i < qdev->intr_count; i++) {
721 /* The enable call does a atomic_dec_and_test
722 * and enables only if the result is zero.
723 * So we precharge it here.
724 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700725 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726 i == 0))
727 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400728 ql_enable_completion_interrupt(qdev, i);
729 }
730
731}
732
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000733static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734{
735 int status, i;
736 u16 csum = 0;
737 __le16 *flash = (__le16 *)&qdev->flash;
738
739 status = strncmp((char *)&qdev->flash, str, 4);
740 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000741 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000742 return status;
743 }
744
745 for (i = 0; i < size; i++)
746 csum += le16_to_cpu(*flash++);
747
748 if (csum)
Joe Perchesae9540f2010-02-09 11:49:52 +0000749 netif_err(qdev, ifup, qdev->ndev,
750 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000751
752 return csum;
753}
754
Ron Mercer26351472009-02-02 13:53:57 -0800755static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400756{
757 int status = 0;
758 /* wait for reg to come ready */
759 status = ql_wait_reg_rdy(qdev,
760 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 if (status)
762 goto exit;
763 /* set up for reg read */
764 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 /* wait for reg to come ready */
766 status = ql_wait_reg_rdy(qdev,
767 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 if (status)
769 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800770 /* This data is stored on flash as an array of
771 * __le32. Since ql_read32() returns cpu endian
772 * we need to swap it back.
773 */
774 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400775exit:
776 return status;
777}
778
Ron Mercercdca8d02009-03-02 08:07:31 +0000779static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780{
781 u32 i, size;
782 int status;
783 __le32 *p = (__le32 *)&qdev->flash;
784 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000785 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000786
787 /* Get flash offset for function and adjust
788 * for dword access.
789 */
Ron Mercere4552f52009-06-09 05:39:32 +0000790 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000791 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792 else
793 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794
795 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 return -ETIMEDOUT;
797
798 size = sizeof(struct flash_params_8000) / sizeof(u32);
799 for (i = 0; i < size; i++, p++) {
800 status = ql_read_flash_word(qdev, i+offset, p);
801 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000802 netif_err(qdev, ifup, qdev->ndev,
803 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000804 goto exit;
805 }
806 }
807
808 status = ql_validate_flash(qdev,
809 sizeof(struct flash_params_8000) / sizeof(u16),
810 "8000");
811 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000812 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000813 status = -EINVAL;
814 goto exit;
815 }
816
Ron Mercer542512e2009-06-09 05:39:33 +0000817 /* Extract either manufacturer or BOFM modified
818 * MAC address.
819 */
820 if (qdev->flash.flash_params_8000.data_type1 == 2)
821 memcpy(mac_addr,
822 qdev->flash.flash_params_8000.mac_addr1,
823 qdev->ndev->addr_len);
824 else
825 memcpy(mac_addr,
826 qdev->flash.flash_params_8000.mac_addr,
827 qdev->ndev->addr_len);
828
829 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000831 status = -EINVAL;
832 goto exit;
833 }
834
835 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000836 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000837 qdev->ndev->addr_len);
838
839exit:
840 ql_sem_unlock(qdev, SEM_FLASH_MASK);
841 return status;
842}
843
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000844static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845{
846 int i;
847 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800848 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800849 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000850 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800851
852 /* Second function's parameters follow the first
853 * function's.
854 */
Ron Mercere4552f52009-06-09 05:39:32 +0000855 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000856 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400857
858 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 return -ETIMEDOUT;
860
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000861 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800862 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400863 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000864 netif_err(qdev, ifup, qdev->ndev,
865 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400866 goto exit;
867 }
868
869 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000870
871 status = ql_validate_flash(qdev,
872 sizeof(struct flash_params_8012) / sizeof(u16),
873 "8012");
874 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000875 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000876 status = -EINVAL;
877 goto exit;
878 }
879
880 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881 status = -EINVAL;
882 goto exit;
883 }
884
885 memcpy(qdev->ndev->dev_addr,
886 qdev->flash.flash_params_8012.mac_addr,
887 qdev->ndev->addr_len);
888
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400889exit:
890 ql_sem_unlock(qdev, SEM_FLASH_MASK);
891 return status;
892}
893
894/* xgmac register are located behind the xgmac_addr and xgmac_data
895 * register pair. Each read/write requires us to wait for the ready
896 * bit before reading/writing the data.
897 */
898static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899{
900 int status;
901 /* wait for reg to come ready */
902 status = ql_wait_reg_rdy(qdev,
903 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 if (status)
905 return status;
906 /* write the data to the data reg */
907 ql_write32(qdev, XGMAC_DATA, data);
908 /* trigger the write */
909 ql_write32(qdev, XGMAC_ADDR, reg);
910 return status;
911}
912
913/* xgmac register are located behind the xgmac_addr and xgmac_data
914 * register pair. Each read/write requires us to wait for the ready
915 * bit before reading/writing the data.
916 */
917int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918{
919 int status = 0;
920 /* wait for reg to come ready */
921 status = ql_wait_reg_rdy(qdev,
922 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 if (status)
924 goto exit;
925 /* set up for reg read */
926 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 /* wait for reg to come ready */
928 status = ql_wait_reg_rdy(qdev,
929 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930 if (status)
931 goto exit;
932 /* get the data */
933 *data = ql_read32(qdev, XGMAC_DATA);
934exit:
935 return status;
936}
937
938/* This is used for reading the 64-bit statistics regs. */
939int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940{
941 int status = 0;
942 u32 hi = 0;
943 u32 lo = 0;
944
945 status = ql_read_xgmac_reg(qdev, reg, &lo);
946 if (status)
947 goto exit;
948
949 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950 if (status)
951 goto exit;
952
953 *data = (u64) lo | ((u64) hi << 32);
954
955exit:
956 return status;
957}
958
Ron Mercercdca8d02009-03-02 08:07:31 +0000959static int ql_8000_port_initialize(struct ql_adapter *qdev)
960{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000961 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000962 /*
963 * Get MPI firmware version for driver banner
964 * and ethool info.
965 */
966 status = ql_mb_about_fw(qdev);
967 if (status)
968 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000969 status = ql_mb_get_fw_state(qdev);
970 if (status)
971 goto exit;
972 /* Wake up a worker to get/set the TX/RX frame sizes. */
973 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974exit:
975 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000976}
977
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400978/* Take the MAC Core out of reset.
979 * Enable statistics counting.
980 * Take the transmitter/receiver out of reset.
981 * This functionality may be done in the MPI firmware at a
982 * later date.
983 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000984static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400985{
986 int status = 0;
987 u32 data;
988
989 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 /* Another function has the semaphore, so
991 * wait for the port init bit to come ready.
992 */
Joe Perchesae9540f2010-02-09 11:49:52 +0000993 netif_info(qdev, link, qdev->ndev,
994 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400995 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000997 netif_crit(qdev, link, qdev->ndev,
998 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400999 }
1000 return status;
1001 }
1002
Joe Perchesae9540f2010-02-09 11:49:52 +00001003 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001004 /* Set the core reset. */
1005 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 if (status)
1007 goto end;
1008 data |= GLOBAL_CFG_RESET;
1009 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010 if (status)
1011 goto end;
1012
1013 /* Clear the core reset and turn on jumbo for receiver. */
1014 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1015 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1016 data |= GLOBAL_CFG_TX_STAT_EN;
1017 data |= GLOBAL_CFG_RX_STAT_EN;
1018 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019 if (status)
1020 goto end;
1021
1022 /* Enable transmitter, and clear it's reset. */
1023 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 if (status)
1025 goto end;
1026 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1027 data |= TX_CFG_EN; /* Enable the transmitter. */
1028 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029 if (status)
1030 goto end;
1031
1032 /* Enable receiver and clear it's reset. */
1033 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 if (status)
1035 goto end;
1036 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1037 data |= RX_CFG_EN; /* Enable the receiver. */
1038 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 if (status)
1040 goto end;
1041
1042 /* Turn on jumbo. */
1043 status =
1044 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045 if (status)
1046 goto end;
1047 status =
1048 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049 if (status)
1050 goto end;
1051
1052 /* Signal to the world that the port is enabled. */
1053 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054end:
1055 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056 return status;
1057}
1058
Ron Mercer7c734352009-10-19 03:32:19 +00001059static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060{
1061 return PAGE_SIZE << qdev->lbq_buf_order;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 rx_ring->lbq_curr_idx++;
1069 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 rx_ring->lbq_curr_idx = 0;
1071 rx_ring->lbq_free_cnt++;
1072 return lbq_desc;
1073}
1074
Ron Mercer7c734352009-10-19 03:32:19 +00001075static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 struct rx_ring *rx_ring)
1077{
1078 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079
1080 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001081 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001082 rx_ring->lbq_buf_size,
1083 PCI_DMA_FROMDEVICE);
1084
1085 /* If it's the last chunk of our master page then
1086 * we unmap it.
1087 */
1088 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 == ql_lbq_block_size(qdev))
1090 pci_unmap_page(qdev->pdev,
1091 lbq_desc->p.pg_chunk.map,
1092 ql_lbq_block_size(qdev),
1093 PCI_DMA_FROMDEVICE);
1094 return lbq_desc;
1095}
1096
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001097/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001098static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001099{
1100 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 rx_ring->sbq_curr_idx++;
1102 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 rx_ring->sbq_curr_idx = 0;
1104 rx_ring->sbq_free_cnt++;
1105 return sbq_desc;
1106}
1107
1108/* Update an rx ring index. */
1109static void ql_update_cq(struct rx_ring *rx_ring)
1110{
1111 rx_ring->cnsmr_idx++;
1112 rx_ring->curr_entry++;
1113 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 rx_ring->cnsmr_idx = 0;
1115 rx_ring->curr_entry = rx_ring->cq_base;
1116 }
1117}
1118
1119static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120{
1121 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122}
1123
Ron Mercer7c734352009-10-19 03:32:19 +00001124static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 struct bq_desc *lbq_desc)
1126{
1127 if (!rx_ring->pg_chunk.page) {
1128 u64 map;
1129 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130 GFP_ATOMIC,
1131 qdev->lbq_buf_order);
1132 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001133 netif_err(qdev, drv, qdev->ndev,
1134 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001135 return -ENOMEM;
1136 }
1137 rx_ring->pg_chunk.offset = 0;
1138 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 0, ql_lbq_block_size(qdev),
1140 PCI_DMA_FROMDEVICE);
1141 if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 __free_pages(rx_ring->pg_chunk.page,
1143 qdev->lbq_buf_order);
Joe Perchesae9540f2010-02-09 11:49:52 +00001144 netif_err(qdev, drv, qdev->ndev,
1145 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001146 return -ENOMEM;
1147 }
1148 rx_ring->pg_chunk.map = map;
1149 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 }
1151
1152 /* Copy the current master pg_chunk info
1153 * to the current descriptor.
1154 */
1155 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156
1157 /* Adjust the master page chunk for next
1158 * buffer get.
1159 */
1160 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 rx_ring->pg_chunk.page = NULL;
1163 lbq_desc->p.pg_chunk.last_flag = 1;
1164 } else {
1165 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 get_page(rx_ring->pg_chunk.page);
1167 lbq_desc->p.pg_chunk.last_flag = 0;
1168 }
1169 return 0;
1170}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171/* Process (refill) a large buffer queue. */
1172static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173{
Ron Mercer49f21862009-02-23 10:42:16 +00001174 u32 clean_idx = rx_ring->lbq_clean_idx;
1175 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001176 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001177 u64 map;
1178 int i;
1179
Ron Mercer7c734352009-10-19 03:32:19 +00001180 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001181 for (i = 0; i < 16; i++) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: try cleaning clean_idx = %d.\n",
1184 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001186 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001187 netif_err(qdev, ifup, qdev->ndev,
1188 "Could not get a page chunk.\n");
1189 return;
1190 }
Ron Mercer7c734352009-10-19 03:32:19 +00001191
1192 map = lbq_desc->p.pg_chunk.map +
1193 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001194 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001196 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001197 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001198
1199 pci_dma_sync_single_for_device(qdev->pdev, map,
1200 rx_ring->lbq_buf_size,
1201 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001202 clean_idx++;
1203 if (clean_idx == rx_ring->lbq_len)
1204 clean_idx = 0;
1205 }
1206
1207 rx_ring->lbq_clean_idx = clean_idx;
1208 rx_ring->lbq_prod_idx += 16;
1209 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001211 rx_ring->lbq_free_cnt -= 16;
1212 }
1213
1214 if (start_idx != clean_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 "lbq: updating prod idx = %d.\n",
1217 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001218 ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001220 }
1221}
1222
1223/* Process (refill) a small buffer queue. */
1224static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225{
Ron Mercer49f21862009-02-23 10:42:16 +00001226 u32 clean_idx = rx_ring->sbq_clean_idx;
1227 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001229 u64 map;
1230 int i;
1231
1232 while (rx_ring->sbq_free_cnt > 16) {
1233 for (i = 0; i < 16; i++) {
1234 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f2010-02-09 11:49:52 +00001235 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 "sbq: try cleaning clean_idx = %d.\n",
1237 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001238 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001239 netif_printk(qdev, rx_status, KERN_DEBUG,
1240 qdev->ndev,
1241 "sbq: getting new skb for index %d.\n",
1242 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001243 sbq_desc->p.skb =
1244 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001245 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001246 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001247 netif_err(qdev, probe, qdev->ndev,
1248 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001249 rx_ring->sbq_clean_idx = clean_idx;
1250 return;
1251 }
1252 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 map = pci_map_single(qdev->pdev,
1254 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001255 rx_ring->sbq_buf_size,
1256 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001257 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001258 netif_err(qdev, ifup, qdev->ndev,
1259 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001260 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001261 dev_kfree_skb_any(sbq_desc->p.skb);
1262 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001263 return;
1264 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001265 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001267 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001268 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001269 }
1270
1271 clean_idx++;
1272 if (clean_idx == rx_ring->sbq_len)
1273 clean_idx = 0;
1274 }
1275 rx_ring->sbq_clean_idx = clean_idx;
1276 rx_ring->sbq_prod_idx += 16;
1277 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001279 rx_ring->sbq_free_cnt -= 16;
1280 }
1281
1282 if (start_idx != clean_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001283 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 "sbq: updating prod idx = %d.\n",
1285 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001286 ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 }
1289}
1290
1291static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 struct rx_ring *rx_ring)
1293{
1294 ql_update_sbq(qdev, rx_ring);
1295 ql_update_lbq(qdev, rx_ring);
1296}
1297
1298/* Unmaps tx buffers. Can be called from send() if a pci mapping
1299 * fails at some stage, or from the interrupt when a tx completes.
1300 */
1301static void ql_unmap_send(struct ql_adapter *qdev,
1302 struct tx_ring_desc *tx_ring_desc, int mapped)
1303{
1304 int i;
1305 for (i = 0; i < mapped; i++) {
1306 if (i == 0 || (i == 7 && mapped > 7)) {
1307 /*
1308 * Unmap the skb->data area, or the
1309 * external sglist (AKA the Outbound
1310 * Address List (OAL)).
1311 * If its the zeroeth element, then it's
1312 * the skb->data area. If it's the 7th
1313 * element and there is more than 6 frags,
1314 * then its an OAL.
1315 */
1316 if (i == 7) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001317 netif_printk(qdev, tx_done, KERN_DEBUG,
1318 qdev->ndev,
1319 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001320 }
1321 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001322 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001324 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001325 maplen),
1326 PCI_DMA_TODEVICE);
1327 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00001328 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001330 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001331 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001332 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001333 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001334 maplen), PCI_DMA_TODEVICE);
1335 }
1336 }
1337
1338}
1339
1340/* Map the buffers for this transmit. This will return
1341 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342 */
1343static int ql_map_send(struct ql_adapter *qdev,
1344 struct ob_mac_iocb_req *mac_iocb_ptr,
1345 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346{
1347 int len = skb_headlen(skb);
1348 dma_addr_t map;
1349 int frag_idx, err, map_idx = 0;
1350 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 int frag_cnt = skb_shinfo(skb)->nr_frags;
1352
1353 if (frag_cnt) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001354 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001356 }
1357 /*
1358 * Map the skb buffer first.
1359 */
1360 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361
1362 err = pci_dma_mapping_error(qdev->pdev, map);
1363 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001364 netif_err(qdev, tx_queued, qdev->ndev,
1365 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001366
1367 return NETDEV_TX_BUSY;
1368 }
1369
1370 tbd->len = cpu_to_le32(len);
1371 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001372 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001374 map_idx++;
1375
1376 /*
1377 * This loop fills the remainder of the 8 address descriptors
1378 * in the IOCB. If there are more than 7 fragments, then the
1379 * eighth address desc will point to an external list (OAL).
1380 * When this happens, the remainder of the frags will be stored
1381 * in this list.
1382 */
1383 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385 tbd++;
1386 if (frag_idx == 6 && frag_cnt > 7) {
1387 /* Let's tack on an sglist.
1388 * Our control block will now
1389 * look like this:
1390 * iocb->seg[0] = skb->data
1391 * iocb->seg[1] = frag[0]
1392 * iocb->seg[2] = frag[1]
1393 * iocb->seg[3] = frag[2]
1394 * iocb->seg[4] = frag[3]
1395 * iocb->seg[5] = frag[4]
1396 * iocb->seg[6] = frag[5]
1397 * iocb->seg[7] = ptr to OAL (external sglist)
1398 * oal->seg[0] = frag[6]
1399 * oal->seg[1] = frag[7]
1400 * oal->seg[2] = frag[8]
1401 * oal->seg[3] = frag[9]
1402 * oal->seg[4] = frag[10]
1403 * etc...
1404 */
1405 /* Tack on the OAL in the eighth segment of IOCB. */
1406 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 sizeof(struct oal),
1408 PCI_DMA_TODEVICE);
1409 err = pci_dma_mapping_error(qdev->pdev, map);
1410 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001411 netif_err(qdev, tx_queued, qdev->ndev,
1412 "PCI mapping outbound address list with error: %d\n",
1413 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001414 goto map_error;
1415 }
1416
1417 tbd->addr = cpu_to_le64(map);
1418 /*
1419 * The length is the number of fragments
1420 * that remain to be mapped times the length
1421 * of our sglist (OAL).
1422 */
1423 tbd->len =
1424 cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001426 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001427 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001428 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001429 sizeof(struct oal));
1430 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431 map_idx++;
1432 }
1433
Eric Dumazet9e903e02011-10-18 21:00:24 +00001434 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001435 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001436
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001437 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001438 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1441 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001442 goto map_error;
1443 }
1444
1445 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001446 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001449 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001450
1451 }
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1457
1458map_error:
1459 /*
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1464 */
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1467}
1468
Ron Mercer4f848c02010-01-02 10:37:43 +00001469/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001470static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 u32 length,
1474 u16 vlan_id)
1475{
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001478 struct napi_struct *napi = &rx_ring->napi;
1479
1480 napi->dev = qdev->ndev;
1481
1482 skb = napi_get_frags(napi);
1483 if (!skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001484 netif_err(qdev, drv, qdev->ndev,
1485 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001486 rx_ring->rx_dropped++;
1487 put_page(lbq_desc->p.pg_chunk.page);
1488 return;
1489 }
1490 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001491 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 lbq_desc->p.pg_chunk.page,
1493 lbq_desc->p.pg_chunk.offset,
1494 length);
Ron Mercer63526712010-01-02 10:37:44 +00001495
1496 skb->len += length;
1497 skb->data_len += length;
1498 skb->truesize += length;
1499 skb_shinfo(skb)->nr_frags++;
1500
1501 rx_ring->rx_packets++;
1502 rx_ring->rx_bytes += length;
1503 skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001505 if (vlan_id != 0xffff)
1506 __vlan_hwaccel_put_tag(skb, vlan_id);
1507 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001508}
1509
1510/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001511static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 struct rx_ring *rx_ring,
1513 struct ib_mac_iocb_rsp *ib_mac_rsp,
1514 u32 length,
1515 u16 vlan_id)
1516{
1517 struct net_device *ndev = qdev->ndev;
1518 struct sk_buff *skb = NULL;
1519 void *addr;
1520 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 struct napi_struct *napi = &rx_ring->napi;
1522
1523 skb = netdev_alloc_skb(ndev, length);
1524 if (!skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001525 netif_err(qdev, drv, qdev->ndev,
1526 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001527 rx_ring->rx_dropped++;
1528 put_page(lbq_desc->p.pg_chunk.page);
1529 return;
1530 }
1531
1532 addr = lbq_desc->p.pg_chunk.va;
1533 prefetch(addr);
1534
1535
1536 /* Frame error, so drop the packet. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001538 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00001539 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001540 rx_ring->rx_errors++;
1541 goto err_out;
1542 }
1543
1544 /* The max framesize filter on this chip is set higher than
1545 * MTU since FCoE uses 2k frames.
1546 */
1547 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001548 netif_err(qdev, drv, qdev->ndev,
1549 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001550 rx_ring->rx_dropped++;
1551 goto err_out;
1552 }
1553 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f2010-02-09 11:49:52 +00001554 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001557 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559 length-ETH_HLEN);
1560 skb->len += length-ETH_HLEN;
1561 skb->data_len += length-ETH_HLEN;
1562 skb->truesize += length-ETH_HLEN;
1563
1564 rx_ring->rx_packets++;
1565 rx_ring->rx_bytes += skb->len;
1566 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001567 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001568
Michał Mirosław88230fd2011-04-18 13:31:21 +00001569 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001570 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571 /* TCP frame. */
1572 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001573 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001575 skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 /* Unfragmented ipv4 UDP frame. */
1579 struct iphdr *iph = (struct iphdr *) skb->data;
1580 if (!(iph->frag_off &
1581 cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00001583 netif_printk(qdev, rx_status, KERN_DEBUG,
1584 qdev->ndev,
1585 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001586 }
1587 }
1588 }
1589
1590 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001591 if (vlan_id != 0xffff)
1592 __vlan_hwaccel_put_tag(skb, vlan_id);
1593 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 napi_gro_receive(napi, skb);
1595 else
1596 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001597 return;
1598err_out:
1599 dev_kfree_skb_any(skb);
1600 put_page(lbq_desc->p.pg_chunk.page);
1601}
1602
1603/* Process an inbound completion from an rx ring. */
1604static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 struct rx_ring *rx_ring,
1606 struct ib_mac_iocb_rsp *ib_mac_rsp,
1607 u32 length,
1608 u16 vlan_id)
1609{
1610 struct net_device *ndev = qdev->ndev;
1611 struct sk_buff *skb = NULL;
1612 struct sk_buff *new_skb = NULL;
1613 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614
1615 skb = sbq_desc->p.skb;
1616 /* Allocate new_skb and copy */
1617 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 if (new_skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001619 netif_err(qdev, probe, qdev->ndev,
1620 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001621 rx_ring->rx_dropped++;
1622 return;
1623 }
1624 skb_reserve(new_skb, NET_IP_ALIGN);
1625 memcpy(skb_put(new_skb, length), skb->data, length);
1626 skb = new_skb;
1627
1628 /* Frame error, so drop the packet. */
1629 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001630 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00001631 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001632 dev_kfree_skb_any(skb);
1633 rx_ring->rx_errors++;
1634 return;
1635 }
1636
1637 /* loopback self test for ethtool */
1638 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639 ql_check_lb_frame(qdev, skb);
1640 dev_kfree_skb_any(skb);
1641 return;
1642 }
1643
1644 /* The max framesize filter on this chip is set higher than
1645 * MTU since FCoE uses 2k frames.
1646 */
1647 if (skb->len > ndev->mtu + ETH_HLEN) {
1648 dev_kfree_skb_any(skb);
1649 rx_ring->rx_dropped++;
1650 return;
1651 }
1652
1653 prefetch(skb->data);
1654 skb->dev = ndev;
1655 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001656 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657 "%s Multicast.\n",
1658 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001664 }
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f2010-02-09 11:49:52 +00001666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001668
1669 rx_ring->rx_packets++;
1670 rx_ring->rx_bytes += skb->len;
1671 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001672 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001673
1674 /* If rx checksum is on, and there are no
1675 * csum or frame errors.
1676 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001677 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001678 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679 /* TCP frame. */
1680 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001681 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001683 skb->ip_summed = CHECKSUM_UNNECESSARY;
1684 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686 /* Unfragmented ipv4 UDP frame. */
1687 struct iphdr *iph = (struct iphdr *) skb->data;
1688 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001689 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00001691 netif_printk(qdev, rx_status, KERN_DEBUG,
1692 qdev->ndev,
1693 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001694 }
1695 }
1696 }
1697
1698 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001699 if (vlan_id != 0xffff)
1700 __vlan_hwaccel_put_tag(skb, vlan_id);
1701 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 napi_gro_receive(&rx_ring->napi, skb);
1703 else
1704 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001705}
1706
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001707static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001708{
1709 void *temp_addr = skb->data;
1710
1711 /* Undo the skb_reserve(skb,32) we did before
1712 * giving to hardware, and realign data on
1713 * a 2-byte boundary.
1714 */
1715 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 skb_copy_to_linear_data(skb, temp_addr,
1718 (unsigned int)len);
1719}
1720
1721/*
1722 * This function builds an skb for the given inbound
1723 * completion. It will be rewritten for readability in the near
1724 * future, but for not it works well.
1725 */
1726static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727 struct rx_ring *rx_ring,
1728 struct ib_mac_iocb_rsp *ib_mac_rsp)
1729{
1730 struct bq_desc *lbq_desc;
1731 struct bq_desc *sbq_desc;
1732 struct sk_buff *skb = NULL;
1733 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736 /*
1737 * Handle the header buffer if present.
1738 */
1739 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001741 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001743 /*
1744 * Headers fit nicely into a small buffer.
1745 */
1746 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001748 dma_unmap_addr(sbq_desc, mapaddr),
1749 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 PCI_DMA_FROMDEVICE);
1751 skb = sbq_desc->p.skb;
1752 ql_realign_skb(skb, hdr_len);
1753 skb_put(skb, hdr_len);
1754 sbq_desc->p.skb = NULL;
1755 }
1756
1757 /*
1758 * Handle the data buffer(s).
1759 */
1760 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f2010-02-09 11:49:52 +00001761 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001763 return skb;
1764 }
1765
1766 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "Headers in small, data of %d bytes in small, combine them.\n",
1770 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001771 /*
1772 * Data is less than small buffer size so it's
1773 * stuffed in a small buffer.
1774 * For this case we append the data
1775 * from the "data" small buffer to the "header" small
1776 * buffer.
1777 */
1778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001780 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001781 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001782 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001783 (sbq_desc, maplen),
1784 PCI_DMA_FROMDEVICE);
1785 memcpy(skb_put(skb, length),
1786 sbq_desc->p.skb->data, length);
1787 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001788 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001789 (sbq_desc,
1790 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001791 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001792 (sbq_desc,
1793 maplen),
1794 PCI_DMA_FROMDEVICE);
1795 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00001796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "%d bytes in a single small buffer.\n",
1798 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001799 sbq_desc = ql_get_curr_sbuf(rx_ring);
1800 skb = sbq_desc->p.skb;
1801 ql_realign_skb(skb, length);
1802 skb_put(skb, length);
1803 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001806 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001807 maplen),
1808 PCI_DMA_FROMDEVICE);
1809 sbq_desc->p.skb = NULL;
1810 }
1811 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001813 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 "Header in small, %d bytes in large. Chain large to small!\n",
1815 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 /*
1817 * The data is in a single large buffer. We
1818 * chain it to the header buffer's skb and let
1819 * it rip.
1820 */
Ron Mercer7c734352009-10-19 03:32:19 +00001821 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f2010-02-09 11:49:52 +00001822 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823 "Chaining page at offset = %d, for %d bytes to skb.\n",
1824 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001825 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826 lbq_desc->p.pg_chunk.offset,
1827 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 skb->len += length;
1829 skb->data_len += length;
1830 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001831 } else {
1832 /*
1833 * The headers and data are in a single large buffer. We
1834 * copy it to a new skb and let it go. This can happen with
1835 * jumbo mtu on a non-TCP/UDP frame.
1836 */
Ron Mercer7c734352009-10-19 03:32:19 +00001837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001838 skb = netdev_alloc_skb(qdev->ndev, length);
1839 if (skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001840 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001842 return NULL;
1843 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001844 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001845 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d2009-01-04 17:07:09 -08001846 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001847 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d2009-01-04 17:07:09 -08001848 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f2010-02-09 11:49:52 +00001850 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001853 skb_fill_page_desc(skb, 0,
1854 lbq_desc->p.pg_chunk.page,
1855 lbq_desc->p.pg_chunk.offset,
1856 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001857 skb->len += length;
1858 skb->data_len += length;
1859 skb->truesize += length;
1860 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001861 __pskb_pull_tail(skb,
1862 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863 VLAN_ETH_HLEN : ETH_HLEN);
1864 }
1865 } else {
1866 /*
1867 * The data is in a chain of large buffers
1868 * pointed to by a small buffer. We loop
1869 * thru and chain them to the our small header
1870 * buffer's skb.
1871 * frags: There are 18 max frags and our small
1872 * buffer will hold 32 of them. The thing is,
1873 * we'll use 3 max for our 9000 byte jumbo
1874 * frames. If the MTU goes up we could
1875 * eventually be in trouble.
1876 */
Ron Mercer7c734352009-10-19 03:32:19 +00001877 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001880 dma_unmap_addr(sbq_desc, mapaddr),
1881 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001882 PCI_DMA_FROMDEVICE);
1883 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884 /*
1885 * This is an non TCP/UDP IP frame, so
1886 * the headers aren't split into a small
1887 * buffer. We have to use the small buffer
1888 * that contains our sg list as our skb to
1889 * send upstairs. Copy the sg list here to
1890 * a local buffer and use it to find the
1891 * pages to chain.
1892 */
Joe Perchesae9540f2010-02-09 11:49:52 +00001893 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894 "%d bytes of headers & data in chain of large.\n",
1895 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001897 sbq_desc->p.skb = NULL;
1898 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899 }
1900 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001901 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902 size = (length < rx_ring->lbq_buf_size) ? length :
1903 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001904
Joe Perchesae9540f2010-02-09 11:49:52 +00001905 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 "Adding page %d to skb for %d bytes.\n",
1907 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001908 skb_fill_page_desc(skb, i,
1909 lbq_desc->p.pg_chunk.page,
1910 lbq_desc->p.pg_chunk.offset,
1911 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001912 skb->len += size;
1913 skb->data_len += size;
1914 skb->truesize += size;
1915 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001916 i++;
1917 }
1918 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919 VLAN_ETH_HLEN : ETH_HLEN);
1920 }
1921 return skb;
1922}
1923
1924/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001925static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001926 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001927 struct ib_mac_iocb_rsp *ib_mac_rsp,
1928 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929{
1930 struct net_device *ndev = qdev->ndev;
1931 struct sk_buff *skb = NULL;
1932
1933 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936 if (unlikely(!skb)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001939 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001940 return;
1941 }
1942
Ron Mercera32959c2009-06-09 05:39:27 +00001943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001945 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00001946 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001947 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001948 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001949 return;
1950 }
Ron Mercerec33a492009-06-09 05:39:28 +00001951
1952 /* The max framesize filter on this chip is set higher than
1953 * MTU since FCoE uses 2k frames.
1954 */
1955 if (skb->len > ndev->mtu + ETH_HLEN) {
1956 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001957 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001958 return;
1959 }
1960
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001961 /* loopback self test for ethtool */
1962 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963 ql_check_lb_frame(qdev, skb);
1964 dev_kfree_skb_any(skb);
1965 return;
1966 }
1967
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001968 prefetch(skb->data);
1969 skb->dev = ndev;
1970 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001978 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001979 }
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001981 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001983 }
Ron Mercerd555f592009-03-09 10:59:19 +00001984
Ron Mercerd555f592009-03-09 10:59:19 +00001985 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001986 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001987
1988 /* If rx checksum is on, and there are no
1989 * csum or frame errors.
1990 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001991 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001992 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1993 /* TCP frame. */
1994 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001995 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001997 skb->ip_summed = CHECKSUM_UNNECESSARY;
1998 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000 /* Unfragmented ipv4 UDP frame. */
2001 struct iphdr *iph = (struct iphdr *) skb->data;
2002 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002003 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002004 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00002005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002007 }
2008 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002009 }
Ron Mercerd555f592009-03-09 10:59:19 +00002010
Ron Mercer885ee392009-11-03 13:49:31 +00002011 rx_ring->rx_packets++;
2012 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002013 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002014 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015 __vlan_hwaccel_put_tag(skb, vlan_id);
2016 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017 napi_gro_receive(&rx_ring->napi, skb);
2018 else
2019 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002020}
2021
Ron Mercer4f848c02010-01-02 10:37:43 +00002022/* Process an inbound completion from an rx ring. */
2023static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024 struct rx_ring *rx_ring,
2025 struct ib_mac_iocb_rsp *ib_mac_rsp)
2026{
2027 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2031
2032 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2033
2034 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035 /* The data and headers are split into
2036 * separate buffers.
2037 */
2038 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2039 vlan_id);
2040 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041 /* The data fit in a single small buffer.
2042 * Allocate a new skb, copy the data and
2043 * return the buffer to the free pool.
2044 */
2045 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2046 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002047 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050 /* TCP packet in a page chunk that's been checksummed.
2051 * Tack it on to our GRO skb and let it go.
2052 */
2053 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2054 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002055 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056 /* Non-TCP packet in a page chunk. Allocate an
2057 * skb, tack it on frags, and send it up.
2058 */
2059 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2060 length, vlan_id);
2061 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002062 /* Non-TCP/UDP large frames that span multiple buffers
2063 * can be processed corrrectly by the split frame logic.
2064 */
2065 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002067 }
2068
2069 return (unsigned long)length;
2070}
2071
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002072/* Process an outbound completion from an rx ring. */
2073static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 struct ob_mac_iocb_rsp *mac_rsp)
2075{
2076 struct tx_ring *tx_ring;
2077 struct tx_ring_desc *tx_ring_desc;
2078
2079 QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002083 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002085 dev_kfree_skb(tx_ring_desc->skb);
2086 tx_ring_desc->skb = NULL;
2087
2088 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089 OB_MAC_IOCB_RSP_S |
2090 OB_MAC_IOCB_RSP_L |
2091 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002093 netif_warn(qdev, tx_done, qdev->ndev,
2094 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002095 }
2096 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002097 netif_warn(qdev, tx_done, qdev->ndev,
2098 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002099 }
2100 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002101 netif_warn(qdev, tx_done, qdev->ndev,
2102 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002103 }
2104 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002105 netif_warn(qdev, tx_done, qdev->ndev,
2106 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002107 }
2108 }
2109 atomic_inc(&tx_ring->tx_count);
2110}
2111
2112/* Fire up a handler to reset the MPI processor. */
2113void ql_queue_fw_error(struct ql_adapter *qdev)
2114{
Ron Mercer6a473302009-07-02 06:06:12 +00002115 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002116 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117}
2118
2119void ql_queue_asic_error(struct ql_adapter *qdev)
2120{
Ron Mercer6a473302009-07-02 06:06:12 +00002121 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002123 /* Clear adapter up bit to signal the recovery
2124 * process that it shouldn't kill the reset worker
2125 * thread
2126 */
2127 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002128 /* Set asic recovery bit to indicate reset process that we are
2129 * in fatal error recovery process rather than normal close
2130 */
2131 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2133}
2134
2135static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136 struct ib_ae_iocb_rsp *ib_ae_rsp)
2137{
2138 switch (ib_ae_rsp->event) {
2139 case MGMT_ERR_EVENT:
Joe Perchesae9540f2010-02-09 11:49:52 +00002140 netif_err(qdev, rx_err, qdev->ndev,
2141 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002142 ql_queue_fw_error(qdev);
2143 return;
2144
2145 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002146 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002148 ql_queue_asic_error(qdev);
2149 return;
2150
2151 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002152 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002153 ql_queue_asic_error(qdev);
2154 break;
2155
2156 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002157 netdev_err(qdev->ndev, "PCI error occurred when reading "
2158 "anonymous buffers from rx_ring %d.\n",
2159 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160 ql_queue_asic_error(qdev);
2161 break;
2162
2163 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002164 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2165 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 ql_queue_asic_error(qdev);
2167 break;
2168 }
2169}
2170
2171static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2172{
2173 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002174 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002175 struct ob_mac_iocb_rsp *net_rsp = NULL;
2176 int count = 0;
2177
Ron Mercer1e213302009-03-09 10:59:21 +00002178 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002179 /* While there are entries in the completion queue. */
2180 while (prod != rx_ring->cnsmr_idx) {
2181
Joe Perchesae9540f2010-02-09 11:49:52 +00002182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185
2186 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2187 rmb();
2188 switch (net_rsp->opcode) {
2189
2190 case OPCODE_OB_MAC_TSO_IOCB:
2191 case OPCODE_OB_MAC_IOCB:
2192 ql_process_mac_tx_intr(qdev, net_rsp);
2193 break;
2194 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002195 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002198 }
2199 count++;
2200 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002201 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002202 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002203 if (!net_rsp)
2204 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002205 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002206 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002207 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002208 if (atomic_read(&tx_ring->queue_stopped) &&
2209 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2210 /*
2211 * The queue got stopped because the tx_ring was full.
2212 * Wake it up, because it's now at least 25% empty.
2213 */
Ron Mercer1e213302009-03-09 10:59:21 +00002214 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002215 }
2216
2217 return count;
2218}
2219
2220static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2221{
2222 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002223 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 struct ql_net_rsp_iocb *net_rsp;
2225 int count = 0;
2226
2227 /* While there are entries in the completion queue. */
2228 while (prod != rx_ring->cnsmr_idx) {
2229
Joe Perchesae9540f2010-02-09 11:49:52 +00002230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002233
2234 net_rsp = rx_ring->curr_entry;
2235 rmb();
2236 switch (net_rsp->opcode) {
2237 case OPCODE_IB_MAC_IOCB:
2238 ql_process_mac_rx_intr(qdev, rx_ring,
2239 (struct ib_mac_iocb_rsp *)
2240 net_rsp);
2241 break;
2242
2243 case OPCODE_IB_AE_IOCB:
2244 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2245 net_rsp);
2246 break;
2247 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250 net_rsp->opcode);
2251 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002252 }
2253 count++;
2254 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002255 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002256 if (count == budget)
2257 break;
2258 }
2259 ql_update_buffer_queues(qdev, rx_ring);
2260 ql_write_cq_idx(rx_ring);
2261 return count;
2262}
2263
2264static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2265{
2266 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002268 struct rx_ring *trx_ring;
2269 int i, work_done = 0;
2270 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271
Joe Perchesae9540f2010-02-09 11:49:52 +00002272 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002274
Ron Mercer39aa8162009-08-27 11:02:11 +00002275 /* Service the TX rings first. They start
2276 * right after the RSS rings. */
2277 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278 trx_ring = &qdev->rx_ring[i];
2279 /* If this TX completion ring belongs to this vector and
2280 * it's not empty then service it.
2281 */
2282 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002285 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286 "%s: Servicing TX completion ring %d.\n",
2287 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002288 ql_clean_outbound_rx_ring(trx_ring);
2289 }
2290 }
2291
2292 /*
2293 * Now service the RSS ring if it's active.
2294 */
2295 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296 rx_ring->cnsmr_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002297 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298 "%s: Servicing RX completion ring %d.\n",
2299 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002300 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2301 }
2302
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002303 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002304 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002305 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306 }
2307 return work_done;
2308}
2309
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002310static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002311{
2312 struct ql_adapter *qdev = netdev_priv(ndev);
2313
Jiri Pirko18c49b92011-07-21 03:24:11 +00002314 if (features & NETIF_F_HW_VLAN_RX) {
2315 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00002316 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002317 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002318 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002319 } else {
Jiri Pirko18c49b92011-07-21 03:24:11 +00002320 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00002321 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002322 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323 }
2324}
2325
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002326static netdev_features_t qlge_fix_features(struct net_device *ndev,
2327 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002328{
2329 /*
2330 * Since there is no support for separate rx/tx vlan accel
2331 * enable/disable make sure tx flag is always in same state as rx.
2332 */
2333 if (features & NETIF_F_HW_VLAN_RX)
2334 features |= NETIF_F_HW_VLAN_TX;
2335 else
2336 features &= ~NETIF_F_HW_VLAN_TX;
2337
2338 return features;
2339}
2340
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002341static int qlge_set_features(struct net_device *ndev,
2342 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002343{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002344 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002345
2346 if (changed & NETIF_F_HW_VLAN_RX)
2347 qlge_vlan_mode(ndev, features);
2348
2349 return 0;
2350}
2351
Jiri Pirko8e586132011-12-08 19:52:37 -05002352static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002353{
2354 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002355 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002356
Jiri Pirko8e586132011-12-08 19:52:37 -05002357 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2358 MAC_ADDR_TYPE_VLAN, vid);
2359 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002360 netif_err(qdev, ifup, qdev->ndev,
2361 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002362 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002363}
2364
Jiri Pirko8e586132011-12-08 19:52:37 -05002365static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002366{
2367 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002368 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002369 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002370
Ron Mercercc288f52009-02-23 10:42:14 +00002371 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2372 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002373 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002374
Jiri Pirko8e586132011-12-08 19:52:37 -05002375 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002376 set_bit(vid, qdev->active_vlans);
2377
Ron Mercercc288f52009-02-23 10:42:14 +00002378 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002379
2380 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002381}
2382
Jiri Pirko8e586132011-12-08 19:52:37 -05002383static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002385 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002386 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002387
Jiri Pirko8e586132011-12-08 19:52:37 -05002388 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2389 MAC_ADDR_TYPE_VLAN, vid);
2390 if (err)
Joe Perchesae9540f2010-02-09 11:49:52 +00002391 netif_err(qdev, ifup, qdev->ndev,
2392 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002393 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002394}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002395
Jiri Pirko8e586132011-12-08 19:52:37 -05002396static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002397{
2398 struct ql_adapter *qdev = netdev_priv(ndev);
2399 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002400 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002401
2402 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2403 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002404 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002405
Jiri Pirko8e586132011-12-08 19:52:37 -05002406 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002407 clear_bit(vid, qdev->active_vlans);
2408
2409 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002410
2411 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002412}
2413
Ron Mercerc1b60092010-10-27 04:58:12 +00002414static void qlge_restore_vlan(struct ql_adapter *qdev)
2415{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002416 int status;
2417 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002418
Jiri Pirko18c49b92011-07-21 03:24:11 +00002419 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2420 if (status)
2421 return;
2422
2423 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2424 __qlge_vlan_rx_add_vid(qdev, vid);
2425
2426 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002427}
2428
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002429/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2430static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2431{
2432 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002433 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434 return IRQ_HANDLED;
2435}
2436
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002437/* This handles a fatal error, MPI activity, and the default
2438 * rx_ring in an MSI-X multiple vector environment.
2439 * In MSI/Legacy environment it also process the rest of
2440 * the rx_rings.
2441 */
2442static irqreturn_t qlge_isr(int irq, void *dev_id)
2443{
2444 struct rx_ring *rx_ring = dev_id;
2445 struct ql_adapter *qdev = rx_ring->qdev;
2446 struct intr_context *intr_context = &qdev->intr_context[0];
2447 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448 int work_done = 0;
2449
Ron Mercerbb0d2152008-10-20 10:30:26 -07002450 spin_lock(&qdev->hw_lock);
2451 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002452 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2453 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002454 spin_unlock(&qdev->hw_lock);
2455 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002456 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002457 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002458
Ron Mercerbb0d2152008-10-20 10:30:26 -07002459 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002460
2461 /*
2462 * Check for fatal error.
2463 */
2464 if (var & STS_FE) {
2465 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002466 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002467 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002468 netdev_err(qdev->ndev, "Resetting chip. "
2469 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002470 return IRQ_HANDLED;
2471 }
2472
2473 /*
2474 * Check MPI processor activity.
2475 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002476 if ((var & STS_PI) &&
2477 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002478 /*
2479 * We've got an async event or mailbox completion.
2480 * Handle it and clear the source of the interrupt.
2481 */
Joe Perchesae9540f2010-02-09 11:49:52 +00002482 netif_err(qdev, intr, qdev->ndev,
2483 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002484 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002485 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2486 queue_delayed_work_on(smp_processor_id(),
2487 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002488 work_done++;
2489 }
2490
2491 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002492 * Get the bit-mask that shows the active queues for this
2493 * pass. Compare it to the queues that this irq services
2494 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002495 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002496 var = ql_read32(qdev, ISR1);
2497 if (var & intr_context->irq_mask) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002498 netif_info(qdev, intr, qdev->ndev,
2499 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002500 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002501 napi_schedule(&rx_ring->napi);
2502 work_done++;
2503 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002504 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002505 return work_done ? IRQ_HANDLED : IRQ_NONE;
2506}
2507
2508static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2509{
2510
2511 if (skb_is_gso(skb)) {
2512 int err;
2513 if (skb_header_cloned(skb)) {
2514 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2515 if (err)
2516 return err;
2517 }
2518
2519 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2520 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2521 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2522 mac_iocb_ptr->total_hdrs_len =
2523 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2524 mac_iocb_ptr->net_trans_offset =
2525 cpu_to_le16(skb_network_offset(skb) |
2526 skb_transport_offset(skb)
2527 << OB_MAC_TRANSPORT_HDR_SHIFT);
2528 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2529 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2530 if (likely(skb->protocol == htons(ETH_P_IP))) {
2531 struct iphdr *iph = ip_hdr(skb);
2532 iph->check = 0;
2533 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2534 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2535 iph->daddr, 0,
2536 IPPROTO_TCP,
2537 0);
2538 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2539 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2540 tcp_hdr(skb)->check =
2541 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2542 &ipv6_hdr(skb)->daddr,
2543 0, IPPROTO_TCP, 0);
2544 }
2545 return 1;
2546 }
2547 return 0;
2548}
2549
2550static void ql_hw_csum_setup(struct sk_buff *skb,
2551 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2552{
2553 int len;
2554 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002555 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002556 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2557 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2558 mac_iocb_ptr->net_trans_offset =
2559 cpu_to_le16(skb_network_offset(skb) |
2560 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2561
2562 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2563 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2564 if (likely(iph->protocol == IPPROTO_TCP)) {
2565 check = &(tcp_hdr(skb)->check);
2566 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2567 mac_iocb_ptr->total_hdrs_len =
2568 cpu_to_le16(skb_transport_offset(skb) +
2569 (tcp_hdr(skb)->doff << 2));
2570 } else {
2571 check = &(udp_hdr(skb)->check);
2572 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2573 mac_iocb_ptr->total_hdrs_len =
2574 cpu_to_le16(skb_transport_offset(skb) +
2575 sizeof(struct udphdr));
2576 }
2577 *check = ~csum_tcpudp_magic(iph->saddr,
2578 iph->daddr, len, iph->protocol, 0);
2579}
2580
Stephen Hemminger613573252009-08-31 19:50:58 +00002581static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002582{
2583 struct tx_ring_desc *tx_ring_desc;
2584 struct ob_mac_iocb_req *mac_iocb_ptr;
2585 struct ql_adapter *qdev = netdev_priv(ndev);
2586 int tso;
2587 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002588 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002589
2590 tx_ring = &qdev->tx_ring[tx_ring_idx];
2591
Ron Mercer74c50b42009-03-09 10:59:27 +00002592 if (skb_padto(skb, ETH_ZLEN))
2593 return NETDEV_TX_OK;
2594
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002595 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002596 netif_info(qdev, tx_queued, qdev->ndev,
2597 "%s: shutting down tx queue %d du to lack of resources.\n",
2598 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002599 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002600 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002601 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002602 return NETDEV_TX_BUSY;
2603 }
2604 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2605 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002606 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002607
2608 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2609 mac_iocb_ptr->tid = tx_ring_desc->index;
2610 /* We use the upper 32-bits to store the tx queue for this IO.
2611 * When we get the completion we can use it to establish the context.
2612 */
2613 mac_iocb_ptr->txq_idx = tx_ring_idx;
2614 tx_ring_desc->skb = skb;
2615
2616 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2617
Jesse Grosseab6d182010-10-20 13:56:03 +00002618 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002619 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2620 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002621 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2622 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2623 }
2624 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2625 if (tso < 0) {
2626 dev_kfree_skb_any(skb);
2627 return NETDEV_TX_OK;
2628 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2629 ql_hw_csum_setup(skb,
2630 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2631 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002632 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2633 NETDEV_TX_OK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002634 netif_err(qdev, tx_queued, qdev->ndev,
2635 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002636 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002637 return NETDEV_TX_BUSY;
2638 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002639 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2640 tx_ring->prod_idx++;
2641 if (tx_ring->prod_idx == tx_ring->wq_len)
2642 tx_ring->prod_idx = 0;
2643 wmb();
2644
2645 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f2010-02-09 11:49:52 +00002646 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2647 "tx queued, slot %d, len %d\n",
2648 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002649
2650 atomic_dec(&tx_ring->tx_count);
2651 return NETDEV_TX_OK;
2652}
2653
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002654
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002655static void ql_free_shadow_space(struct ql_adapter *qdev)
2656{
2657 if (qdev->rx_ring_shadow_reg_area) {
2658 pci_free_consistent(qdev->pdev,
2659 PAGE_SIZE,
2660 qdev->rx_ring_shadow_reg_area,
2661 qdev->rx_ring_shadow_reg_dma);
2662 qdev->rx_ring_shadow_reg_area = NULL;
2663 }
2664 if (qdev->tx_ring_shadow_reg_area) {
2665 pci_free_consistent(qdev->pdev,
2666 PAGE_SIZE,
2667 qdev->tx_ring_shadow_reg_area,
2668 qdev->tx_ring_shadow_reg_dma);
2669 qdev->tx_ring_shadow_reg_area = NULL;
2670 }
2671}
2672
2673static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2674{
2675 qdev->rx_ring_shadow_reg_area =
2676 pci_alloc_consistent(qdev->pdev,
2677 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2678 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002679 netif_err(qdev, ifup, qdev->ndev,
2680 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002681 return -ENOMEM;
2682 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002683 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002684 qdev->tx_ring_shadow_reg_area =
2685 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2686 &qdev->tx_ring_shadow_reg_dma);
2687 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002688 netif_err(qdev, ifup, qdev->ndev,
2689 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002690 goto err_wqp_sh_area;
2691 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002692 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002693 return 0;
2694
2695err_wqp_sh_area:
2696 pci_free_consistent(qdev->pdev,
2697 PAGE_SIZE,
2698 qdev->rx_ring_shadow_reg_area,
2699 qdev->rx_ring_shadow_reg_dma);
2700 return -ENOMEM;
2701}
2702
2703static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2704{
2705 struct tx_ring_desc *tx_ring_desc;
2706 int i;
2707 struct ob_mac_iocb_req *mac_iocb_ptr;
2708
2709 mac_iocb_ptr = tx_ring->wq_base;
2710 tx_ring_desc = tx_ring->q;
2711 for (i = 0; i < tx_ring->wq_len; i++) {
2712 tx_ring_desc->index = i;
2713 tx_ring_desc->skb = NULL;
2714 tx_ring_desc->queue_entry = mac_iocb_ptr;
2715 mac_iocb_ptr++;
2716 tx_ring_desc++;
2717 }
2718 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2719 atomic_set(&tx_ring->queue_stopped, 0);
2720}
2721
2722static void ql_free_tx_resources(struct ql_adapter *qdev,
2723 struct tx_ring *tx_ring)
2724{
2725 if (tx_ring->wq_base) {
2726 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2727 tx_ring->wq_base, tx_ring->wq_base_dma);
2728 tx_ring->wq_base = NULL;
2729 }
2730 kfree(tx_ring->q);
2731 tx_ring->q = NULL;
2732}
2733
2734static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2735 struct tx_ring *tx_ring)
2736{
2737 tx_ring->wq_base =
2738 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2739 &tx_ring->wq_base_dma);
2740
Joe Perches8e95a202009-12-03 07:58:21 +00002741 if ((tx_ring->wq_base == NULL) ||
2742 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002743 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002744 return -ENOMEM;
2745 }
2746 tx_ring->q =
2747 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2748 if (tx_ring->q == NULL)
2749 goto err;
2750
2751 return 0;
2752err:
2753 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2754 tx_ring->wq_base, tx_ring->wq_base_dma);
2755 return -ENOMEM;
2756}
2757
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002758static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002759{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002760 struct bq_desc *lbq_desc;
2761
Ron Mercer7c734352009-10-19 03:32:19 +00002762 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002763
Ron Mercer7c734352009-10-19 03:32:19 +00002764 curr_idx = rx_ring->lbq_curr_idx;
2765 clean_idx = rx_ring->lbq_clean_idx;
2766 while (curr_idx != clean_idx) {
2767 lbq_desc = &rx_ring->lbq[curr_idx];
2768
2769 if (lbq_desc->p.pg_chunk.last_flag) {
2770 pci_unmap_page(qdev->pdev,
2771 lbq_desc->p.pg_chunk.map,
2772 ql_lbq_block_size(qdev),
2773 PCI_DMA_FROMDEVICE);
2774 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775 }
Ron Mercer7c734352009-10-19 03:32:19 +00002776
2777 put_page(lbq_desc->p.pg_chunk.page);
2778 lbq_desc->p.pg_chunk.page = NULL;
2779
2780 if (++curr_idx == rx_ring->lbq_len)
2781 curr_idx = 0;
2782
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002783 }
2784}
2785
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002786static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002787{
2788 int i;
2789 struct bq_desc *sbq_desc;
2790
2791 for (i = 0; i < rx_ring->sbq_len; i++) {
2792 sbq_desc = &rx_ring->sbq[i];
2793 if (sbq_desc == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002794 netif_err(qdev, ifup, qdev->ndev,
2795 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002796 return;
2797 }
2798 if (sbq_desc->p.skb) {
2799 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002800 dma_unmap_addr(sbq_desc, mapaddr),
2801 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002802 PCI_DMA_FROMDEVICE);
2803 dev_kfree_skb(sbq_desc->p.skb);
2804 sbq_desc->p.skb = NULL;
2805 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002806 }
2807}
2808
Ron Mercer4545a3f2009-02-23 10:42:17 +00002809/* Free all large and small rx buffers associated
2810 * with the completion queues for this device.
2811 */
2812static void ql_free_rx_buffers(struct ql_adapter *qdev)
2813{
2814 int i;
2815 struct rx_ring *rx_ring;
2816
2817 for (i = 0; i < qdev->rx_ring_count; i++) {
2818 rx_ring = &qdev->rx_ring[i];
2819 if (rx_ring->lbq)
2820 ql_free_lbq_buffers(qdev, rx_ring);
2821 if (rx_ring->sbq)
2822 ql_free_sbq_buffers(qdev, rx_ring);
2823 }
2824}
2825
2826static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2827{
2828 struct rx_ring *rx_ring;
2829 int i;
2830
2831 for (i = 0; i < qdev->rx_ring_count; i++) {
2832 rx_ring = &qdev->rx_ring[i];
2833 if (rx_ring->type != TX_Q)
2834 ql_update_buffer_queues(qdev, rx_ring);
2835 }
2836}
2837
2838static void ql_init_lbq_ring(struct ql_adapter *qdev,
2839 struct rx_ring *rx_ring)
2840{
2841 int i;
2842 struct bq_desc *lbq_desc;
2843 __le64 *bq = rx_ring->lbq_base;
2844
2845 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2846 for (i = 0; i < rx_ring->lbq_len; i++) {
2847 lbq_desc = &rx_ring->lbq[i];
2848 memset(lbq_desc, 0, sizeof(*lbq_desc));
2849 lbq_desc->index = i;
2850 lbq_desc->addr = bq;
2851 bq++;
2852 }
2853}
2854
2855static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002856 struct rx_ring *rx_ring)
2857{
2858 int i;
2859 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002860 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002861
Ron Mercer4545a3f2009-02-23 10:42:17 +00002862 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002863 for (i = 0; i < rx_ring->sbq_len; i++) {
2864 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002865 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002866 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002867 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002868 bq++;
2869 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002870}
2871
2872static void ql_free_rx_resources(struct ql_adapter *qdev,
2873 struct rx_ring *rx_ring)
2874{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002875 /* Free the small buffer queue. */
2876 if (rx_ring->sbq_base) {
2877 pci_free_consistent(qdev->pdev,
2878 rx_ring->sbq_size,
2879 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2880 rx_ring->sbq_base = NULL;
2881 }
2882
2883 /* Free the small buffer queue control blocks. */
2884 kfree(rx_ring->sbq);
2885 rx_ring->sbq = NULL;
2886
2887 /* Free the large buffer queue. */
2888 if (rx_ring->lbq_base) {
2889 pci_free_consistent(qdev->pdev,
2890 rx_ring->lbq_size,
2891 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2892 rx_ring->lbq_base = NULL;
2893 }
2894
2895 /* Free the large buffer queue control blocks. */
2896 kfree(rx_ring->lbq);
2897 rx_ring->lbq = NULL;
2898
2899 /* Free the rx queue. */
2900 if (rx_ring->cq_base) {
2901 pci_free_consistent(qdev->pdev,
2902 rx_ring->cq_size,
2903 rx_ring->cq_base, rx_ring->cq_base_dma);
2904 rx_ring->cq_base = NULL;
2905 }
2906}
2907
2908/* Allocate queues and buffers for this completions queue based
2909 * on the values in the parameter structure. */
2910static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2911 struct rx_ring *rx_ring)
2912{
2913
2914 /*
2915 * Allocate the completion queue for this rx_ring.
2916 */
2917 rx_ring->cq_base =
2918 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2919 &rx_ring->cq_base_dma);
2920
2921 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002922 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002923 return -ENOMEM;
2924 }
2925
2926 if (rx_ring->sbq_len) {
2927 /*
2928 * Allocate small buffer queue.
2929 */
2930 rx_ring->sbq_base =
2931 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2932 &rx_ring->sbq_base_dma);
2933
2934 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002935 netif_err(qdev, ifup, qdev->ndev,
2936 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937 goto err_mem;
2938 }
2939
2940 /*
2941 * Allocate small buffer queue control blocks.
2942 */
2943 rx_ring->sbq =
2944 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2945 GFP_KERNEL);
2946 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002947 netif_err(qdev, ifup, qdev->ndev,
2948 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002949 goto err_mem;
2950 }
2951
Ron Mercer4545a3f2009-02-23 10:42:17 +00002952 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002953 }
2954
2955 if (rx_ring->lbq_len) {
2956 /*
2957 * Allocate large buffer queue.
2958 */
2959 rx_ring->lbq_base =
2960 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2961 &rx_ring->lbq_base_dma);
2962
2963 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002964 netif_err(qdev, ifup, qdev->ndev,
2965 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002966 goto err_mem;
2967 }
2968 /*
2969 * Allocate large buffer queue control blocks.
2970 */
2971 rx_ring->lbq =
2972 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2973 GFP_KERNEL);
2974 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002975 netif_err(qdev, ifup, qdev->ndev,
2976 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002977 goto err_mem;
2978 }
2979
Ron Mercer4545a3f2009-02-23 10:42:17 +00002980 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002981 }
2982
2983 return 0;
2984
2985err_mem:
2986 ql_free_rx_resources(qdev, rx_ring);
2987 return -ENOMEM;
2988}
2989
2990static void ql_tx_ring_clean(struct ql_adapter *qdev)
2991{
2992 struct tx_ring *tx_ring;
2993 struct tx_ring_desc *tx_ring_desc;
2994 int i, j;
2995
2996 /*
2997 * Loop through all queues and free
2998 * any resources.
2999 */
3000 for (j = 0; j < qdev->tx_ring_count; j++) {
3001 tx_ring = &qdev->tx_ring[j];
3002 for (i = 0; i < tx_ring->wq_len; i++) {
3003 tx_ring_desc = &tx_ring->q[i];
3004 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003005 netif_err(qdev, ifdown, qdev->ndev,
3006 "Freeing lost SKB %p, from queue %d, index %d.\n",
3007 tx_ring_desc->skb, j,
3008 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003009 ql_unmap_send(qdev, tx_ring_desc,
3010 tx_ring_desc->map_cnt);
3011 dev_kfree_skb(tx_ring_desc->skb);
3012 tx_ring_desc->skb = NULL;
3013 }
3014 }
3015 }
3016}
3017
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003018static void ql_free_mem_resources(struct ql_adapter *qdev)
3019{
3020 int i;
3021
3022 for (i = 0; i < qdev->tx_ring_count; i++)
3023 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3024 for (i = 0; i < qdev->rx_ring_count; i++)
3025 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3026 ql_free_shadow_space(qdev);
3027}
3028
3029static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3030{
3031 int i;
3032
3033 /* Allocate space for our shadow registers and such. */
3034 if (ql_alloc_shadow_space(qdev))
3035 return -ENOMEM;
3036
3037 for (i = 0; i < qdev->rx_ring_count; i++) {
3038 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003039 netif_err(qdev, ifup, qdev->ndev,
3040 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003041 goto err_mem;
3042 }
3043 }
3044 /* Allocate tx queue resources */
3045 for (i = 0; i < qdev->tx_ring_count; i++) {
3046 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003047 netif_err(qdev, ifup, qdev->ndev,
3048 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003049 goto err_mem;
3050 }
3051 }
3052 return 0;
3053
3054err_mem:
3055 ql_free_mem_resources(qdev);
3056 return -ENOMEM;
3057}
3058
3059/* Set up the rx ring control block and pass it to the chip.
3060 * The control block is defined as
3061 * "Completion Queue Initialization Control Block", or cqicb.
3062 */
3063static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3064{
3065 struct cqicb *cqicb = &rx_ring->cqicb;
3066 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003067 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003068 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003069 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003070 void __iomem *doorbell_area =
3071 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3072 int err = 0;
3073 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003074 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003075 __le64 *base_indirect_ptr;
3076 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003077
3078 /* Set up the shadow registers for this ring. */
3079 rx_ring->prod_idx_sh_reg = shadow_reg;
3080 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003081 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003082 shadow_reg += sizeof(u64);
3083 shadow_reg_dma += sizeof(u64);
3084 rx_ring->lbq_base_indirect = shadow_reg;
3085 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003086 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3087 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003088 rx_ring->sbq_base_indirect = shadow_reg;
3089 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3090
3091 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003092 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003093 rx_ring->cnsmr_idx = 0;
3094 rx_ring->curr_entry = rx_ring->cq_base;
3095
3096 /* PCI doorbell mem area + 0x04 for valid register */
3097 rx_ring->valid_db_reg = doorbell_area + 0x04;
3098
3099 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003100 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003101
3102 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003103 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003104
3105 memset((void *)cqicb, 0, sizeof(struct cqicb));
3106 cqicb->msix_vect = rx_ring->irq;
3107
Ron Mercer459caf52009-01-04 17:08:11 -08003108 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3109 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003110
Ron Mercer97345522009-01-09 11:31:50 +00003111 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003112
Ron Mercer97345522009-01-09 11:31:50 +00003113 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003114
3115 /*
3116 * Set up the control block load flags.
3117 */
3118 cqicb->flags = FLAGS_LC | /* Load queue base address */
3119 FLAGS_LV | /* Load MSI-X vector */
3120 FLAGS_LI; /* Load irq delay values */
3121 if (rx_ring->lbq_len) {
3122 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003123 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003124 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003125 page_entries = 0;
3126 do {
3127 *base_indirect_ptr = cpu_to_le64(tmp);
3128 tmp += DB_PAGE_SIZE;
3129 base_indirect_ptr++;
3130 page_entries++;
3131 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003132 cqicb->lbq_addr =
3133 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003134 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3135 (u16) rx_ring->lbq_buf_size;
3136 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3137 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3138 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003139 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003140 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003141 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003142 rx_ring->lbq_clean_idx = 0;
3143 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003144 }
3145 if (rx_ring->sbq_len) {
3146 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003147 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003148 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003149 page_entries = 0;
3150 do {
3151 *base_indirect_ptr = cpu_to_le64(tmp);
3152 tmp += DB_PAGE_SIZE;
3153 base_indirect_ptr++;
3154 page_entries++;
3155 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003156 cqicb->sbq_addr =
3157 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003159 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003160 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3161 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003162 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003163 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003164 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003165 rx_ring->sbq_clean_idx = 0;
3166 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003167 }
3168 switch (rx_ring->type) {
3169 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003170 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3171 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3172 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003173 case RX_Q:
3174 /* Inbound completion handling rx_rings run in
3175 * separate NAPI contexts.
3176 */
3177 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3178 64);
3179 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3180 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3181 break;
3182 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00003183 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3184 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003185 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003186 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3187 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003188 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3189 CFG_LCQ, rx_ring->cq_id);
3190 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003191 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003192 return err;
3193 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003194 return err;
3195}
3196
3197static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3198{
3199 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3200 void __iomem *doorbell_area =
3201 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3202 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3203 (tx_ring->wq_id * sizeof(u64));
3204 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3205 (tx_ring->wq_id * sizeof(u64));
3206 int err = 0;
3207
3208 /*
3209 * Assign doorbell registers for this tx_ring.
3210 */
3211 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003212 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003213 tx_ring->prod_idx = 0;
3214 /* TX PCI doorbell mem area + 0x04 */
3215 tx_ring->valid_db_reg = doorbell_area + 0x04;
3216
3217 /*
3218 * Assign shadow registers for this tx_ring.
3219 */
3220 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3221 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3222
3223 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3224 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3225 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3226 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3227 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003228 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003229
Ron Mercer97345522009-01-09 11:31:50 +00003230 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003231
3232 ql_init_tx_ring(qdev, tx_ring);
3233
Ron Mercere3324712009-07-02 06:06:13 +00003234 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003235 (u16) tx_ring->wq_id);
3236 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003237 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003238 return err;
3239 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003240 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3241 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003242 return err;
3243}
3244
3245static void ql_disable_msix(struct ql_adapter *qdev)
3246{
3247 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3248 pci_disable_msix(qdev->pdev);
3249 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3250 kfree(qdev->msi_x_entry);
3251 qdev->msi_x_entry = NULL;
3252 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3253 pci_disable_msi(qdev->pdev);
3254 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3255 }
3256}
3257
Ron Mercera4ab6132009-08-27 11:02:10 +00003258/* We start by trying to get the number of vectors
3259 * stored in qdev->intr_count. If we don't get that
3260 * many then we reduce the count and try again.
3261 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003262static void ql_enable_msix(struct ql_adapter *qdev)
3263{
Ron Mercera4ab6132009-08-27 11:02:10 +00003264 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003265
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003266 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003267 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003268 /* Try to alloc space for the msix struct,
3269 * if it fails then go to MSI/legacy.
3270 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003271 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003272 sizeof(struct msix_entry),
3273 GFP_KERNEL);
3274 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003275 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003276 goto msi;
3277 }
3278
Ron Mercera4ab6132009-08-27 11:02:10 +00003279 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003280 qdev->msi_x_entry[i].entry = i;
3281
Ron Mercera4ab6132009-08-27 11:02:10 +00003282 /* Loop to get our vectors. We start with
3283 * what we want and settle for what we get.
3284 */
3285 do {
3286 err = pci_enable_msix(qdev->pdev,
3287 qdev->msi_x_entry, qdev->intr_count);
3288 if (err > 0)
3289 qdev->intr_count = err;
3290 } while (err > 0);
3291
3292 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003293 kfree(qdev->msi_x_entry);
3294 qdev->msi_x_entry = NULL;
Joe Perchesae9540f2010-02-09 11:49:52 +00003295 netif_warn(qdev, ifup, qdev->ndev,
3296 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003297 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003298 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003299 } else if (err == 0) {
3300 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f2010-02-09 11:49:52 +00003301 netif_info(qdev, ifup, qdev->ndev,
3302 "MSI-X Enabled, got %d vectors.\n",
3303 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003304 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003305 }
3306 }
3307msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003308 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003309 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003310 if (!pci_enable_msi(qdev->pdev)) {
3311 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f2010-02-09 11:49:52 +00003312 netif_info(qdev, ifup, qdev->ndev,
3313 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003314 return;
3315 }
3316 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003317 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f2010-02-09 11:49:52 +00003318 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3319 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003320}
3321
Ron Mercer39aa8162009-08-27 11:02:11 +00003322/* Each vector services 1 RSS ring and and 1 or more
3323 * TX completion rings. This function loops through
3324 * the TX completion rings and assigns the vector that
3325 * will service it. An example would be if there are
3326 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3327 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003328 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003329 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3330 */
3331static void ql_set_tx_vect(struct ql_adapter *qdev)
3332{
3333 int i, j, vect;
3334 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3335
3336 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3337 /* Assign irq vectors to TX rx_rings.*/
3338 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3339 i < qdev->rx_ring_count; i++) {
3340 if (j == tx_rings_per_vector) {
3341 vect++;
3342 j = 0;
3343 }
3344 qdev->rx_ring[i].irq = vect;
3345 j++;
3346 }
3347 } else {
3348 /* For single vector all rings have an irq
3349 * of zero.
3350 */
3351 for (i = 0; i < qdev->rx_ring_count; i++)
3352 qdev->rx_ring[i].irq = 0;
3353 }
3354}
3355
3356/* Set the interrupt mask for this vector. Each vector
3357 * will service 1 RSS ring and 1 or more TX completion
3358 * rings. This function sets up a bit mask per vector
3359 * that indicates which rings it services.
3360 */
3361static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3362{
3363 int j, vect = ctx->intr;
3364 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3365
3366 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3367 /* Add the RSS ring serviced by this vector
3368 * to the mask.
3369 */
3370 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3371 /* Add the TX ring(s) serviced by this vector
3372 * to the mask. */
3373 for (j = 0; j < tx_rings_per_vector; j++) {
3374 ctx->irq_mask |=
3375 (1 << qdev->rx_ring[qdev->rss_ring_count +
3376 (vect * tx_rings_per_vector) + j].cq_id);
3377 }
3378 } else {
3379 /* For single vector we just shift each queue's
3380 * ID into the mask.
3381 */
3382 for (j = 0; j < qdev->rx_ring_count; j++)
3383 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3384 }
3385}
3386
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003387/*
3388 * Here we build the intr_context structures based on
3389 * our rx_ring count and intr vector count.
3390 * The intr_context structure is used to hook each vector
3391 * to possibly different handlers.
3392 */
3393static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3394{
3395 int i = 0;
3396 struct intr_context *intr_context = &qdev->intr_context[0];
3397
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003398 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3399 /* Each rx_ring has it's
3400 * own intr_context since we have separate
3401 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003402 */
3403 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3404 qdev->rx_ring[i].irq = i;
3405 intr_context->intr = i;
3406 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003407 /* Set up this vector's bit-mask that indicates
3408 * which queues it services.
3409 */
3410 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003411 /*
3412 * We set up each vectors enable/disable/read bits so
3413 * there's no bit/mask calculations in the critical path.
3414 */
3415 intr_context->intr_en_mask =
3416 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3417 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3418 | i;
3419 intr_context->intr_dis_mask =
3420 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3421 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3422 INTR_EN_IHD | i;
3423 intr_context->intr_read_mask =
3424 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3425 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3426 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003427 if (i == 0) {
3428 /* The first vector/queue handles
3429 * broadcast/multicast, fatal errors,
3430 * and firmware events. This in addition
3431 * to normal inbound NAPI processing.
3432 */
3433 intr_context->handler = qlge_isr;
3434 sprintf(intr_context->name, "%s-rx-%d",
3435 qdev->ndev->name, i);
3436 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003437 /*
3438 * Inbound queues handle unicast frames only.
3439 */
3440 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003441 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003442 qdev->ndev->name, i);
3443 }
3444 }
3445 } else {
3446 /*
3447 * All rx_rings use the same intr_context since
3448 * there is only one vector.
3449 */
3450 intr_context->intr = 0;
3451 intr_context->qdev = qdev;
3452 /*
3453 * We set up each vectors enable/disable/read bits so
3454 * there's no bit/mask calculations in the critical path.
3455 */
3456 intr_context->intr_en_mask =
3457 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3458 intr_context->intr_dis_mask =
3459 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3460 INTR_EN_TYPE_DISABLE;
3461 intr_context->intr_read_mask =
3462 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3463 /*
3464 * Single interrupt means one handler for all rings.
3465 */
3466 intr_context->handler = qlge_isr;
3467 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003468 /* Set up this vector's bit-mask that indicates
3469 * which queues it services. In this case there is
3470 * a single vector so it will service all RSS and
3471 * TX completion rings.
3472 */
3473 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003474 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003475 /* Tell the TX completion rings which MSIx vector
3476 * they will be using.
3477 */
3478 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003479}
3480
3481static void ql_free_irq(struct ql_adapter *qdev)
3482{
3483 int i;
3484 struct intr_context *intr_context = &qdev->intr_context[0];
3485
3486 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3487 if (intr_context->hooked) {
3488 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3489 free_irq(qdev->msi_x_entry[i].vector,
3490 &qdev->rx_ring[i]);
Joe Perchesae9540f2010-02-09 11:49:52 +00003491 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3492 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003493 } else {
3494 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f2010-02-09 11:49:52 +00003495 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3496 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003497 }
3498 }
3499 }
3500 ql_disable_msix(qdev);
3501}
3502
3503static int ql_request_irq(struct ql_adapter *qdev)
3504{
3505 int i;
3506 int status = 0;
3507 struct pci_dev *pdev = qdev->pdev;
3508 struct intr_context *intr_context = &qdev->intr_context[0];
3509
3510 ql_resolve_queues_to_irqs(qdev);
3511
3512 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3513 atomic_set(&intr_context->irq_cnt, 0);
3514 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3515 status = request_irq(qdev->msi_x_entry[i].vector,
3516 intr_context->handler,
3517 0,
3518 intr_context->name,
3519 &qdev->rx_ring[i]);
3520 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003521 netif_err(qdev, ifup, qdev->ndev,
3522 "Failed request for MSIX interrupt %d.\n",
3523 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003524 goto err_irq;
3525 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00003526 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3527 "Hooked intr %d, queue type %s, with name %s.\n",
3528 i,
3529 qdev->rx_ring[i].type == DEFAULT_Q ?
3530 "DEFAULT_Q" :
3531 qdev->rx_ring[i].type == TX_Q ?
3532 "TX_Q" :
3533 qdev->rx_ring[i].type == RX_Q ?
3534 "RX_Q" : "",
3535 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003536 }
3537 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00003538 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3539 "trying msi or legacy interrupts.\n");
3540 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3541 "%s: irq = %d.\n", __func__, pdev->irq);
3542 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3543 "%s: context->name = %s.\n", __func__,
3544 intr_context->name);
3545 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3546 "%s: dev_id = 0x%p.\n", __func__,
3547 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003548 status =
3549 request_irq(pdev->irq, qlge_isr,
3550 test_bit(QL_MSI_ENABLED,
3551 &qdev->
3552 flags) ? 0 : IRQF_SHARED,
3553 intr_context->name, &qdev->rx_ring[0]);
3554 if (status)
3555 goto err_irq;
3556
Joe Perchesae9540f2010-02-09 11:49:52 +00003557 netif_err(qdev, ifup, qdev->ndev,
3558 "Hooked intr %d, queue type %s, with name %s.\n",
3559 i,
3560 qdev->rx_ring[0].type == DEFAULT_Q ?
3561 "DEFAULT_Q" :
3562 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3563 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3564 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565 }
3566 intr_context->hooked = 1;
3567 }
3568 return status;
3569err_irq:
Joe Perchesae9540f2010-02-09 11:49:52 +00003570 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003571 ql_free_irq(qdev);
3572 return status;
3573}
3574
3575static int ql_start_rss(struct ql_adapter *qdev)
3576{
Joe Perches215faf92010-12-21 02:16:10 -08003577 static const u8 init_hash_seed[] = {
3578 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3579 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3580 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3581 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3582 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3583 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003584 struct ricb *ricb = &qdev->ricb;
3585 int status = 0;
3586 int i;
3587 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3588
Ron Mercere3324712009-07-02 06:06:13 +00003589 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003590
Ron Mercerb2014ff2009-08-27 11:02:09 +00003591 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003592 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003593 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3594 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003595
3596 /*
3597 * Fill out the Indirection Table.
3598 */
Ron Mercer541ae282009-10-08 09:54:37 +00003599 for (i = 0; i < 1024; i++)
3600 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003601
Ron Mercer541ae282009-10-08 09:54:37 +00003602 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3603 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003604
Joe Perchesae9540f2010-02-09 11:49:52 +00003605 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003606
Ron Mercere3324712009-07-02 06:06:13 +00003607 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003608 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003609 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003610 return status;
3611 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003612 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3613 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003614 return status;
3615}
3616
Ron Mercera5f59dc2009-07-02 06:06:07 +00003617static int ql_clear_routing_entries(struct ql_adapter *qdev)
3618{
3619 int i, status = 0;
3620
3621 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3622 if (status)
3623 return status;
3624 /* Clear all the entries in the routing table. */
3625 for (i = 0; i < 16; i++) {
3626 status = ql_set_routing_reg(qdev, i, 0, 0);
3627 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003628 netif_err(qdev, ifup, qdev->ndev,
3629 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003630 break;
3631 }
3632 }
3633 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3634 return status;
3635}
3636
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003637/* Initialize the frame-to-queue routing. */
3638static int ql_route_initialize(struct ql_adapter *qdev)
3639{
3640 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641
3642 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003643 status = ql_clear_routing_entries(qdev);
3644 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003645 return status;
3646
3647 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3648 if (status)
3649 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003650
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003651 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3652 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003653 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003654 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003655 "Failed to init routing register "
3656 "for IP CSUM error packets.\n");
3657 goto exit;
3658 }
3659 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3660 RT_IDX_TU_CSUM_ERR, 1);
3661 if (status) {
3662 netif_err(qdev, ifup, qdev->ndev,
3663 "Failed to init routing register "
3664 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003665 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003666 }
3667 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3668 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003669 netif_err(qdev, ifup, qdev->ndev,
3670 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003671 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003672 }
3673 /* If we have more than one inbound queue, then turn on RSS in the
3674 * routing block.
3675 */
3676 if (qdev->rss_ring_count > 1) {
3677 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3678 RT_IDX_RSS_MATCH, 1);
3679 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003680 netif_err(qdev, ifup, qdev->ndev,
3681 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003682 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683 }
3684 }
3685
3686 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3687 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003688 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003689 netif_err(qdev, ifup, qdev->ndev,
3690 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003691exit:
3692 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003693 return status;
3694}
3695
Ron Mercer2ee1e272009-03-03 12:10:33 +00003696int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003697{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003698 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003699
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003700 /* If check if the link is up and use to
3701 * determine if we are setting or clearing
3702 * the MAC address in the CAM.
3703 */
3704 set = ql_read32(qdev, STS);
3705 set &= qdev->port_link_up;
3706 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003707 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003708 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003709 return status;
3710 }
3711
3712 status = ql_route_initialize(qdev);
3713 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003714 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003715
3716 return status;
3717}
3718
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003719static int ql_adapter_initialize(struct ql_adapter *qdev)
3720{
3721 u32 value, mask;
3722 int i;
3723 int status = 0;
3724
3725 /*
3726 * Set up the System register to halt on errors.
3727 */
3728 value = SYS_EFE | SYS_FAE;
3729 mask = value << 16;
3730 ql_write32(qdev, SYS, mask | value);
3731
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003732 /* Set the default queue, and VLAN behavior. */
3733 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3734 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003735 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3736
3737 /* Set the MPI interrupt to enabled. */
3738 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3739
3740 /* Enable the function, set pagesize, enable error checking. */
3741 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003742 FSC_EC | FSC_VM_PAGE_4K;
3743 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003744
3745 /* Set/clear header splitting. */
3746 mask = FSC_VM_PAGESIZE_MASK |
3747 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3748 ql_write32(qdev, FSC, mask | value);
3749
Ron Mercer572c5262010-01-02 10:37:42 +00003750 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003751
Ron Mercera3b71932009-10-08 09:54:38 +00003752 /* Set RX packet routing to use port/pci function on which the
3753 * packet arrived on in addition to usual frame routing.
3754 * This is helpful on bonding where both interfaces can have
3755 * the same MAC address.
3756 */
3757 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003758 /* Reroute all packets to our Interface.
3759 * They may have been routed to MPI firmware
3760 * due to WOL.
3761 */
3762 value = ql_read32(qdev, MGMT_RCV_CFG);
3763 value &= ~MGMT_RCV_CFG_RM;
3764 mask = 0xffff0000;
3765
3766 /* Sticky reg needs clearing due to WOL. */
3767 ql_write32(qdev, MGMT_RCV_CFG, mask);
3768 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3769
3770 /* Default WOL is enable on Mezz cards */
3771 if (qdev->pdev->subsystem_device == 0x0068 ||
3772 qdev->pdev->subsystem_device == 0x0180)
3773 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003774
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003775 /* Start up the rx queues. */
3776 for (i = 0; i < qdev->rx_ring_count; i++) {
3777 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3778 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003779 netif_err(qdev, ifup, qdev->ndev,
3780 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003781 return status;
3782 }
3783 }
3784
3785 /* If there is more than one inbound completion queue
3786 * then download a RICB to configure RSS.
3787 */
3788 if (qdev->rss_ring_count > 1) {
3789 status = ql_start_rss(qdev);
3790 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003791 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003792 return status;
3793 }
3794 }
3795
3796 /* Start up the tx queues. */
3797 for (i = 0; i < qdev->tx_ring_count; i++) {
3798 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3799 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003800 netif_err(qdev, ifup, qdev->ndev,
3801 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003802 return status;
3803 }
3804 }
3805
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003806 /* Initialize the port and set the max framesize. */
3807 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003808 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003809 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003810
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003811 /* Set up the MAC address and frame routing filter. */
3812 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003814 netif_err(qdev, ifup, qdev->ndev,
3815 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003816 return status;
3817 }
3818
3819 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003820 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003821 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3822 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003823 napi_enable(&qdev->rx_ring[i].napi);
3824 }
3825
3826 return status;
3827}
3828
3829/* Issue soft reset to chip. */
3830static int ql_adapter_reset(struct ql_adapter *qdev)
3831{
3832 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003833 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003834 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003835
Ron Mercera5f59dc2009-07-02 06:06:07 +00003836 /* Clear all the entries in the routing table. */
3837 status = ql_clear_routing_entries(qdev);
3838 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003839 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003840 return status;
3841 }
3842
3843 end_jiffies = jiffies +
3844 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003845
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003846 /* Check if bit is set then skip the mailbox command and
3847 * clear the bit, else we are in normal reset process.
3848 */
3849 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3850 /* Stop management traffic. */
3851 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003852
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003853 /* Wait for the NIC and MGMNT FIFOs to empty. */
3854 ql_wait_fifo_empty(qdev);
3855 } else
3856 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003857
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003858 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003859
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003860 do {
3861 value = ql_read32(qdev, RST_FO);
3862 if ((value & RST_FO_FR) == 0)
3863 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003864 cpu_relax();
3865 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003866
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003867 if (value & RST_FO_FR) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003868 netif_err(qdev, ifdown, qdev->ndev,
3869 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003870 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003871 }
3872
Ron Mercer84087f42009-10-08 09:54:41 +00003873 /* Resume management traffic. */
3874 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003875 return status;
3876}
3877
3878static void ql_display_dev_info(struct net_device *ndev)
3879{
Joe Perchesb16fed02010-11-15 11:12:28 +00003880 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003881
Joe Perchesae9540f2010-02-09 11:49:52 +00003882 netif_info(qdev, probe, qdev->ndev,
3883 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3884 "XG Roll = %d, XG Rev = %d.\n",
3885 qdev->func,
3886 qdev->port,
3887 qdev->chip_rev_id & 0x0000000f,
3888 qdev->chip_rev_id >> 4 & 0x0000000f,
3889 qdev->chip_rev_id >> 8 & 0x0000000f,
3890 qdev->chip_rev_id >> 12 & 0x0000000f);
3891 netif_info(qdev, probe, qdev->ndev,
3892 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003893}
3894
stephen hemmingerac409212010-10-21 07:50:54 +00003895static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003896{
3897 int status = 0;
3898 u32 wol = MB_WOL_DISABLE;
3899
3900 /* The CAM is still intact after a reset, but if we
3901 * are doing WOL, then we may need to program the
3902 * routing regs. We would also need to issue the mailbox
3903 * commands to instruct the MPI what to do per the ethtool
3904 * settings.
3905 */
3906
3907 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3908 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003909 netif_err(qdev, ifdown, qdev->ndev,
3910 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3911 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003912 return -EINVAL;
3913 }
3914
3915 if (qdev->wol & WAKE_MAGIC) {
3916 status = ql_mb_wol_set_magic(qdev, 1);
3917 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003918 netif_err(qdev, ifdown, qdev->ndev,
3919 "Failed to set magic packet on %s.\n",
3920 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003921 return status;
3922 } else
Joe Perchesae9540f2010-02-09 11:49:52 +00003923 netif_info(qdev, drv, qdev->ndev,
3924 "Enabled magic packet successfully on %s.\n",
3925 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003926
3927 wol |= MB_WOL_MAGIC_PKT;
3928 }
3929
3930 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003931 wol |= MB_WOL_MODE_ON;
3932 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f2010-02-09 11:49:52 +00003933 netif_err(qdev, drv, qdev->ndev,
3934 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003935 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f2010-02-09 11:49:52 +00003936 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003937 }
3938
3939 return status;
3940}
3941
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003942static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003943{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003944
Ron Mercer6497b602009-02-12 16:37:13 -08003945 /* Don't kill the reset worker thread if we
3946 * are in the process of recovery.
3947 */
3948 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3949 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003950 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3951 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003952 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003953 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003954 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003955}
3956
3957static int ql_adapter_down(struct ql_adapter *qdev)
3958{
3959 int i, status = 0;
3960
3961 ql_link_off(qdev);
3962
3963 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003964
Ron Mercer39aa8162009-08-27 11:02:11 +00003965 for (i = 0; i < qdev->rss_ring_count; i++)
3966 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003967
3968 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3969
3970 ql_disable_interrupts(qdev);
3971
3972 ql_tx_ring_clean(qdev);
3973
Ron Mercer6b318cb2009-03-09 10:59:26 +00003974 /* Call netif_napi_del() from common point.
3975 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003976 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003977 netif_napi_del(&qdev->rx_ring[i].napi);
3978
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003979 status = ql_adapter_reset(qdev);
3980 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003981 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3982 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003983 ql_free_rx_buffers(qdev);
3984
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003985 return status;
3986}
3987
3988static int ql_adapter_up(struct ql_adapter *qdev)
3989{
3990 int err = 0;
3991
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003992 err = ql_adapter_initialize(qdev);
3993 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003994 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003995 goto err_init;
3996 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003997 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003998 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003999 /* If the port is initialized and the
4000 * link is up the turn on the carrier.
4001 */
4002 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4003 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00004004 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004005 /* Restore rx mode. */
4006 clear_bit(QL_ALLMULTI, &qdev->flags);
4007 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4008 qlge_set_multicast_list(qdev->ndev);
4009
Ron Mercerc1b60092010-10-27 04:58:12 +00004010 /* Restore vlan setting. */
4011 qlge_restore_vlan(qdev);
4012
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004013 ql_enable_interrupts(qdev);
4014 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004015 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004016
4017 return 0;
4018err_init:
4019 ql_adapter_reset(qdev);
4020 return err;
4021}
4022
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004023static void ql_release_adapter_resources(struct ql_adapter *qdev)
4024{
4025 ql_free_mem_resources(qdev);
4026 ql_free_irq(qdev);
4027}
4028
4029static int ql_get_adapter_resources(struct ql_adapter *qdev)
4030{
4031 int status = 0;
4032
4033 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004034 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004035 return -ENOMEM;
4036 }
4037 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004038 return status;
4039}
4040
4041static int qlge_close(struct net_device *ndev)
4042{
4043 struct ql_adapter *qdev = netdev_priv(ndev);
4044
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004045 /* If we hit pci_channel_io_perm_failure
4046 * failure condition, then we already
4047 * brought the adapter down.
4048 */
4049 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004050 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004051 clear_bit(QL_EEH_FATAL, &qdev->flags);
4052 return 0;
4053 }
4054
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004055 /*
4056 * Wait for device to recover from a reset.
4057 * (Rarely happens, but possible.)
4058 */
4059 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4060 msleep(1);
4061 ql_adapter_down(qdev);
4062 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004063 return 0;
4064}
4065
4066static int ql_configure_rings(struct ql_adapter *qdev)
4067{
4068 int i;
4069 struct rx_ring *rx_ring;
4070 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004071 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004072 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4073 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4074
4075 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004076
Ron Mercera4ab6132009-08-27 11:02:10 +00004077 /* In a perfect world we have one RSS ring for each CPU
4078 * and each has it's own vector. To do that we ask for
4079 * cpu_cnt vectors. ql_enable_msix() will adjust the
4080 * vector count to what we actually get. We then
4081 * allocate an RSS ring for each.
4082 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004083 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004084 qdev->intr_count = cpu_cnt;
4085 ql_enable_msix(qdev);
4086 /* Adjust the RSS ring count to the actual vector count. */
4087 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004088 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004089 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004090
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004091 for (i = 0; i < qdev->tx_ring_count; i++) {
4092 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004093 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004094 tx_ring->qdev = qdev;
4095 tx_ring->wq_id = i;
4096 tx_ring->wq_len = qdev->tx_ring_size;
4097 tx_ring->wq_size =
4098 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4099
4100 /*
4101 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004102 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004103 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004104 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004105 }
4106
4107 for (i = 0; i < qdev->rx_ring_count; i++) {
4108 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004109 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004110 rx_ring->qdev = qdev;
4111 rx_ring->cq_id = i;
4112 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004113 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004114 /*
4115 * Inbound (RSS) queues.
4116 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004117 rx_ring->cq_len = qdev->rx_ring_size;
4118 rx_ring->cq_size =
4119 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4120 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4121 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004122 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004123 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f2010-02-09 11:49:52 +00004124 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4125 "lbq_buf_size %d, order = %d\n",
4126 rx_ring->lbq_buf_size,
4127 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004128 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4129 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004130 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004131 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004132 rx_ring->type = RX_Q;
4133 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004134 /*
4135 * Outbound queue handles outbound completions only.
4136 */
4137 /* outbound cq is same size as tx_ring it services. */
4138 rx_ring->cq_len = qdev->tx_ring_size;
4139 rx_ring->cq_size =
4140 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4141 rx_ring->lbq_len = 0;
4142 rx_ring->lbq_size = 0;
4143 rx_ring->lbq_buf_size = 0;
4144 rx_ring->sbq_len = 0;
4145 rx_ring->sbq_size = 0;
4146 rx_ring->sbq_buf_size = 0;
4147 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004148 }
4149 }
4150 return 0;
4151}
4152
4153static int qlge_open(struct net_device *ndev)
4154{
4155 int err = 0;
4156 struct ql_adapter *qdev = netdev_priv(ndev);
4157
Ron Mercer74e12432009-11-11 12:54:04 +00004158 err = ql_adapter_reset(qdev);
4159 if (err)
4160 return err;
4161
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004162 err = ql_configure_rings(qdev);
4163 if (err)
4164 return err;
4165
4166 err = ql_get_adapter_resources(qdev);
4167 if (err)
4168 goto error_up;
4169
4170 err = ql_adapter_up(qdev);
4171 if (err)
4172 goto error_up;
4173
4174 return err;
4175
4176error_up:
4177 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004178 return err;
4179}
4180
Ron Mercer7c734352009-10-19 03:32:19 +00004181static int ql_change_rx_buffers(struct ql_adapter *qdev)
4182{
4183 struct rx_ring *rx_ring;
4184 int i, status;
4185 u32 lbq_buf_len;
4186
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004187 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004188 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4189 int i = 3;
4190 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004191 netif_err(qdev, ifup, qdev->ndev,
4192 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004193 ssleep(1);
4194 }
4195
4196 if (!i) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004197 netif_err(qdev, ifup, qdev->ndev,
4198 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004199 return -ETIMEDOUT;
4200 }
4201 }
4202
4203 status = ql_adapter_down(qdev);
4204 if (status)
4205 goto error;
4206
4207 /* Get the new rx buffer size. */
4208 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4209 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4210 qdev->lbq_buf_order = get_order(lbq_buf_len);
4211
4212 for (i = 0; i < qdev->rss_ring_count; i++) {
4213 rx_ring = &qdev->rx_ring[i];
4214 /* Set the new size. */
4215 rx_ring->lbq_buf_size = lbq_buf_len;
4216 }
4217
4218 status = ql_adapter_up(qdev);
4219 if (status)
4220 goto error;
4221
4222 return status;
4223error:
Joe Perchesae9540f2010-02-09 11:49:52 +00004224 netif_alert(qdev, ifup, qdev->ndev,
4225 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004226 set_bit(QL_ADAPTER_UP, &qdev->flags);
4227 dev_close(qdev->ndev);
4228 return status;
4229}
4230
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004231static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4232{
4233 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004234 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004235
4236 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004237 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004238 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004239 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004240 } else
4241 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004242
4243 queue_delayed_work(qdev->workqueue,
4244 &qdev->mpi_port_cfg_work, 3*HZ);
4245
Breno Leitao746079d2010-02-04 10:11:19 +00004246 ndev->mtu = new_mtu;
4247
Ron Mercer7c734352009-10-19 03:32:19 +00004248 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004249 return 0;
4250 }
4251
Ron Mercer7c734352009-10-19 03:32:19 +00004252 status = ql_change_rx_buffers(qdev);
4253 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004254 netif_err(qdev, ifup, qdev->ndev,
4255 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004256 }
4257
4258 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004259}
4260
4261static struct net_device_stats *qlge_get_stats(struct net_device
4262 *ndev)
4263{
Ron Mercer885ee392009-11-03 13:49:31 +00004264 struct ql_adapter *qdev = netdev_priv(ndev);
4265 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4266 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4267 unsigned long pkts, mcast, dropped, errors, bytes;
4268 int i;
4269
4270 /* Get RX stats. */
4271 pkts = mcast = dropped = errors = bytes = 0;
4272 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4273 pkts += rx_ring->rx_packets;
4274 bytes += rx_ring->rx_bytes;
4275 dropped += rx_ring->rx_dropped;
4276 errors += rx_ring->rx_errors;
4277 mcast += rx_ring->rx_multicast;
4278 }
4279 ndev->stats.rx_packets = pkts;
4280 ndev->stats.rx_bytes = bytes;
4281 ndev->stats.rx_dropped = dropped;
4282 ndev->stats.rx_errors = errors;
4283 ndev->stats.multicast = mcast;
4284
4285 /* Get TX stats. */
4286 pkts = errors = bytes = 0;
4287 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4288 pkts += tx_ring->tx_packets;
4289 bytes += tx_ring->tx_bytes;
4290 errors += tx_ring->tx_errors;
4291 }
4292 ndev->stats.tx_packets = pkts;
4293 ndev->stats.tx_bytes = bytes;
4294 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004295 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004296}
4297
stephen hemmingerac409212010-10-21 07:50:54 +00004298static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004299{
Joe Perchesb16fed02010-11-15 11:12:28 +00004300 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad2010-04-01 21:22:57 +00004301 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004302 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004303
Ron Mercercc288f52009-02-23 10:42:14 +00004304 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4305 if (status)
4306 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004307 /*
4308 * Set or clear promiscuous mode if a
4309 * transition is taking place.
4310 */
4311 if (ndev->flags & IFF_PROMISC) {
4312 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4313 if (ql_set_routing_reg
4314 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004315 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004316 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004317 } else {
4318 set_bit(QL_PROMISCUOUS, &qdev->flags);
4319 }
4320 }
4321 } else {
4322 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4323 if (ql_set_routing_reg
4324 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004325 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004326 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327 } else {
4328 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4329 }
4330 }
4331 }
4332
4333 /*
4334 * Set or clear all multicast mode if a
4335 * transition is taking place.
4336 */
4337 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004338 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004339 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4340 if (ql_set_routing_reg
4341 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004342 netif_err(qdev, hw, qdev->ndev,
4343 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004344 } else {
4345 set_bit(QL_ALLMULTI, &qdev->flags);
4346 }
4347 }
4348 } else {
4349 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4350 if (ql_set_routing_reg
4351 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004352 netif_err(qdev, hw, qdev->ndev,
4353 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004354 } else {
4355 clear_bit(QL_ALLMULTI, &qdev->flags);
4356 }
4357 }
4358 }
4359
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004360 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004361 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4362 if (status)
4363 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004364 i = 0;
Jiri Pirko22bedad2010-04-01 21:22:57 +00004365 netdev_for_each_mc_addr(ha, ndev) {
4366 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004367 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004368 netif_err(qdev, hw, qdev->ndev,
4369 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004370 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004371 goto exit;
4372 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004373 i++;
4374 }
Ron Mercercc288f52009-02-23 10:42:14 +00004375 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004376 if (ql_set_routing_reg
4377 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004378 netif_err(qdev, hw, qdev->ndev,
4379 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004380 } else {
4381 set_bit(QL_ALLMULTI, &qdev->flags);
4382 }
4383 }
4384exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004385 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004386}
4387
4388static int qlge_set_mac_address(struct net_device *ndev, void *p)
4389{
Joe Perchesb16fed02010-11-15 11:12:28 +00004390 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004391 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004392 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004393
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004394 if (!is_valid_ether_addr(addr->sa_data))
4395 return -EADDRNOTAVAIL;
4396 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004397 /* Update local copy of current mac address. */
4398 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004399
Ron Mercercc288f52009-02-23 10:42:14 +00004400 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4401 if (status)
4402 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004403 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4404 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004405 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00004406 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004407 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4408 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004409}
4410
4411static void qlge_tx_timeout(struct net_device *ndev)
4412{
Joe Perchesb16fed02010-11-15 11:12:28 +00004413 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004414 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004415}
4416
4417static void ql_asic_reset_work(struct work_struct *work)
4418{
4419 struct ql_adapter *qdev =
4420 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004421 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004422 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004423 status = ql_adapter_down(qdev);
4424 if (status)
4425 goto error;
4426
4427 status = ql_adapter_up(qdev);
4428 if (status)
4429 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004430
4431 /* Restore rx mode. */
4432 clear_bit(QL_ALLMULTI, &qdev->flags);
4433 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4434 qlge_set_multicast_list(qdev->ndev);
4435
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004436 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004437 return;
4438error:
Joe Perchesae9540f2010-02-09 11:49:52 +00004439 netif_alert(qdev, ifup, qdev->ndev,
4440 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004441
Ron Mercerdb988122009-03-09 10:59:17 +00004442 set_bit(QL_ADAPTER_UP, &qdev->flags);
4443 dev_close(qdev->ndev);
4444 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004445}
4446
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004447static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004448 .get_flash = ql_get_8012_flash_params,
4449 .port_initialize = ql_8012_port_initialize,
4450};
4451
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004452static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004453 .get_flash = ql_get_8000_flash_params,
4454 .port_initialize = ql_8000_port_initialize,
4455};
4456
Ron Mercere4552f52009-06-09 05:39:32 +00004457/* Find the pcie function number for the other NIC
4458 * on this chip. Since both NIC functions share a
4459 * common firmware we have the lowest enabled function
4460 * do any common work. Examples would be resetting
4461 * after a fatal firmware error, or doing a firmware
4462 * coredump.
4463 */
4464static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004465{
Ron Mercere4552f52009-06-09 05:39:32 +00004466 int status = 0;
4467 u32 temp;
4468 u32 nic_func1, nic_func2;
4469
4470 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4471 &temp);
4472 if (status)
4473 return status;
4474
4475 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4476 MPI_TEST_NIC_FUNC_MASK);
4477 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4478 MPI_TEST_NIC_FUNC_MASK);
4479
4480 if (qdev->func == nic_func1)
4481 qdev->alt_func = nic_func2;
4482 else if (qdev->func == nic_func2)
4483 qdev->alt_func = nic_func1;
4484 else
4485 status = -EIO;
4486
4487 return status;
4488}
4489
4490static int ql_get_board_info(struct ql_adapter *qdev)
4491{
4492 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004493 qdev->func =
4494 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004495 if (qdev->func > 3)
4496 return -EIO;
4497
4498 status = ql_get_alt_pcie_func(qdev);
4499 if (status)
4500 return status;
4501
4502 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4503 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004504 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4505 qdev->port_link_up = STS_PL1;
4506 qdev->port_init = STS_PI1;
4507 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4508 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4509 } else {
4510 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4511 qdev->port_link_up = STS_PL0;
4512 qdev->port_init = STS_PI0;
4513 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4514 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4515 }
4516 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004517 qdev->device_id = qdev->pdev->device;
4518 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4519 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004520 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4521 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004522 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004523}
4524
4525static void ql_release_all(struct pci_dev *pdev)
4526{
4527 struct net_device *ndev = pci_get_drvdata(pdev);
4528 struct ql_adapter *qdev = netdev_priv(ndev);
4529
4530 if (qdev->workqueue) {
4531 destroy_workqueue(qdev->workqueue);
4532 qdev->workqueue = NULL;
4533 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004534
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004535 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004536 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004537 if (qdev->doorbell_area)
4538 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004539 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004540 pci_release_regions(pdev);
4541 pci_set_drvdata(pdev, NULL);
4542}
4543
4544static int __devinit ql_init_device(struct pci_dev *pdev,
4545 struct net_device *ndev, int cards_found)
4546{
4547 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004548 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549
Ron Mercere3324712009-07-02 06:06:13 +00004550 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004551 err = pci_enable_device(pdev);
4552 if (err) {
4553 dev_err(&pdev->dev, "PCI device enable failed.\n");
4554 return err;
4555 }
4556
Ron Mercerebd6e772009-09-29 08:39:25 +00004557 qdev->ndev = ndev;
4558 qdev->pdev = pdev;
4559 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004560
Ron Mercerbc9167f2009-10-10 09:35:04 +00004561 /* Set PCIe read request size */
4562 err = pcie_set_readrq(pdev, 4096);
4563 if (err) {
4564 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004565 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004566 }
4567
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004568 err = pci_request_regions(pdev, DRV_NAME);
4569 if (err) {
4570 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004571 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004572 }
4573
4574 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004575 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004577 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004579 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004580 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004581 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582 }
4583
4584 if (err) {
4585 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004586 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 }
4588
Ron Mercer73475332009-11-06 07:44:58 +00004589 /* Set PCIe reset type for EEH to fundamental. */
4590 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004591 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004592 qdev->reg_base =
4593 ioremap_nocache(pci_resource_start(pdev, 1),
4594 pci_resource_len(pdev, 1));
4595 if (!qdev->reg_base) {
4596 dev_err(&pdev->dev, "Register mapping failed.\n");
4597 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004598 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004599 }
4600
4601 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4602 qdev->doorbell_area =
4603 ioremap_nocache(pci_resource_start(pdev, 3),
4604 pci_resource_len(pdev, 3));
4605 if (!qdev->doorbell_area) {
4606 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4607 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004608 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004609 }
4610
Ron Mercere4552f52009-06-09 05:39:32 +00004611 err = ql_get_board_info(qdev);
4612 if (err) {
4613 dev_err(&pdev->dev, "Register access failed.\n");
4614 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004615 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004616 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004617 qdev->msg_enable = netif_msg_init(debug, default_msg);
4618 spin_lock_init(&qdev->hw_lock);
4619 spin_lock_init(&qdev->stats_lock);
4620
Ron Mercer8aae2602010-01-15 13:31:28 +00004621 if (qlge_mpi_coredump) {
4622 qdev->mpi_coredump =
4623 vmalloc(sizeof(struct ql_mpi_coredump));
4624 if (qdev->mpi_coredump == NULL) {
4625 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4626 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004627 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004628 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004629 if (qlge_force_coredump)
4630 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004631 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004632 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004633 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004634 if (err) {
4635 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004636 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004637 }
4638
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004639 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004640 /* Keep local copy of current mac address. */
4641 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004642
4643 /* Set up the default ring sizes. */
4644 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4645 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4646
4647 /* Set up the coalescing parameters. */
4648 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4649 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4650 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4651 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4652
4653 /*
4654 * Set up the operating parameters.
4655 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004656 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4657 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4658 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4659 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004660 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004661 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004662 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004663 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004664 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004665
4666 if (!cards_found) {
4667 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4668 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4669 DRV_NAME, DRV_VERSION);
4670 }
4671 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004672err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004673 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004674err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004675 pci_disable_device(pdev);
4676 return err;
4677}
4678
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004679static const struct net_device_ops qlge_netdev_ops = {
4680 .ndo_open = qlge_open,
4681 .ndo_stop = qlge_close,
4682 .ndo_start_xmit = qlge_send,
4683 .ndo_change_mtu = qlge_change_mtu,
4684 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004685 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004686 .ndo_set_mac_address = qlge_set_mac_address,
4687 .ndo_validate_addr = eth_validate_addr,
4688 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004689 .ndo_fix_features = qlge_fix_features,
4690 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004691 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4692 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004693};
4694
Ron Mercer15c052f2010-02-04 13:32:46 -08004695static void ql_timer(unsigned long data)
4696{
4697 struct ql_adapter *qdev = (struct ql_adapter *)data;
4698 u32 var = 0;
4699
4700 var = ql_read32(qdev, STS);
4701 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004702 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004703 return;
4704 }
4705
Breno Leitao72046d82010-07-01 03:00:17 +00004706 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004707}
4708
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004709static int __devinit qlge_probe(struct pci_dev *pdev,
4710 const struct pci_device_id *pci_entry)
4711{
4712 struct net_device *ndev = NULL;
4713 struct ql_adapter *qdev = NULL;
4714 static int cards_found = 0;
4715 int err = 0;
4716
Ron Mercer1e213302009-03-09 10:59:21 +00004717 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4718 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004719 if (!ndev)
4720 return -ENOMEM;
4721
4722 err = ql_init_device(pdev, ndev, cards_found);
4723 if (err < 0) {
4724 free_netdev(ndev);
4725 return err;
4726 }
4727
4728 qdev = netdev_priv(ndev);
4729 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004730 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4731 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4732 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4733 ndev->features = ndev->hw_features |
4734 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004735
4736 if (test_bit(QL_DMA64, &qdev->flags))
4737 ndev->features |= NETIF_F_HIGHDMA;
4738
4739 /*
4740 * Set up net_device structure.
4741 */
4742 ndev->tx_queue_len = qdev->tx_ring_size;
4743 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004744
4745 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004746 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004747 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004748
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004749 err = register_netdev(ndev);
4750 if (err) {
4751 dev_err(&pdev->dev, "net device registration failed.\n");
4752 ql_release_all(pdev);
4753 pci_disable_device(pdev);
4754 return err;
4755 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004756 /* Start up the timer to trigger EEH if
4757 * the bus goes dead
4758 */
4759 init_timer_deferrable(&qdev->timer);
4760 qdev->timer.data = (unsigned long)qdev;
4761 qdev->timer.function = ql_timer;
4762 qdev->timer.expires = jiffies + (5*HZ);
4763 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004764 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004765 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004766 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004767 cards_found++;
4768 return 0;
4769}
4770
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004771netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4772{
4773 return qlge_send(skb, ndev);
4774}
4775
4776int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4777{
4778 return ql_clean_inbound_rx_ring(rx_ring, budget);
4779}
4780
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004781static void __devexit qlge_remove(struct pci_dev *pdev)
4782{
4783 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004784 struct ql_adapter *qdev = netdev_priv(ndev);
4785 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004786 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004787 unregister_netdev(ndev);
4788 ql_release_all(pdev);
4789 pci_disable_device(pdev);
4790 free_netdev(ndev);
4791}
4792
Ron Mercer6d190c62009-10-28 08:39:20 +00004793/* Clean up resources without touching hardware. */
4794static void ql_eeh_close(struct net_device *ndev)
4795{
4796 int i;
4797 struct ql_adapter *qdev = netdev_priv(ndev);
4798
4799 if (netif_carrier_ok(ndev)) {
4800 netif_carrier_off(ndev);
4801 netif_stop_queue(ndev);
4802 }
4803
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004804 /* Disabling the timer */
4805 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004806 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004807
4808 for (i = 0; i < qdev->rss_ring_count; i++)
4809 netif_napi_del(&qdev->rx_ring[i].napi);
4810
4811 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4812 ql_tx_ring_clean(qdev);
4813 ql_free_rx_buffers(qdev);
4814 ql_release_adapter_resources(qdev);
4815}
4816
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004817/*
4818 * This callback is called by the PCI subsystem whenever
4819 * a PCI bus error is detected.
4820 */
4821static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4822 enum pci_channel_state state)
4823{
4824 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004825 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004826
Ron Mercer6d190c62009-10-28 08:39:20 +00004827 switch (state) {
4828 case pci_channel_io_normal:
4829 return PCI_ERS_RESULT_CAN_RECOVER;
4830 case pci_channel_io_frozen:
4831 netif_device_detach(ndev);
4832 if (netif_running(ndev))
4833 ql_eeh_close(ndev);
4834 pci_disable_device(pdev);
4835 return PCI_ERS_RESULT_NEED_RESET;
4836 case pci_channel_io_perm_failure:
4837 dev_err(&pdev->dev,
4838 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004839 ql_eeh_close(ndev);
4840 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004841 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004842 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004843
4844 /* Request a slot reset. */
4845 return PCI_ERS_RESULT_NEED_RESET;
4846}
4847
4848/*
4849 * This callback is called after the PCI buss has been reset.
4850 * Basically, this tries to restart the card from scratch.
4851 * This is a shortened version of the device probe/discovery code,
4852 * it resembles the first-half of the () routine.
4853 */
4854static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4855{
4856 struct net_device *ndev = pci_get_drvdata(pdev);
4857 struct ql_adapter *qdev = netdev_priv(ndev);
4858
Ron Mercer6d190c62009-10-28 08:39:20 +00004859 pdev->error_state = pci_channel_io_normal;
4860
4861 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004862 if (pci_enable_device(pdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004863 netif_err(qdev, ifup, qdev->ndev,
4864 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004865 return PCI_ERS_RESULT_DISCONNECT;
4866 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004867 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004868
4869 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004870 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004871 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004872 return PCI_ERS_RESULT_DISCONNECT;
4873 }
4874
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004875 return PCI_ERS_RESULT_RECOVERED;
4876}
4877
4878static void qlge_io_resume(struct pci_dev *pdev)
4879{
4880 struct net_device *ndev = pci_get_drvdata(pdev);
4881 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004882 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004883
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004884 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004885 err = qlge_open(ndev);
4886 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004887 netif_err(qdev, ifup, qdev->ndev,
4888 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004889 return;
4890 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004891 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00004892 netif_err(qdev, ifup, qdev->ndev,
4893 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004894 }
Breno Leitao72046d82010-07-01 03:00:17 +00004895 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004896 netif_device_attach(ndev);
4897}
4898
4899static struct pci_error_handlers qlge_err_handler = {
4900 .error_detected = qlge_io_error_detected,
4901 .slot_reset = qlge_io_slot_reset,
4902 .resume = qlge_io_resume,
4903};
4904
4905static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4906{
4907 struct net_device *ndev = pci_get_drvdata(pdev);
4908 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004909 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004910
4911 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004912 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004913
4914 if (netif_running(ndev)) {
4915 err = ql_adapter_down(qdev);
4916 if (!err)
4917 return err;
4918 }
4919
Ron Mercerbc083ce2009-10-21 11:07:40 +00004920 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004921 err = pci_save_state(pdev);
4922 if (err)
4923 return err;
4924
4925 pci_disable_device(pdev);
4926
4927 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4928
4929 return 0;
4930}
4931
David S. Miller04da2cf2008-09-19 16:14:24 -07004932#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004933static int qlge_resume(struct pci_dev *pdev)
4934{
4935 struct net_device *ndev = pci_get_drvdata(pdev);
4936 struct ql_adapter *qdev = netdev_priv(ndev);
4937 int err;
4938
4939 pci_set_power_state(pdev, PCI_D0);
4940 pci_restore_state(pdev);
4941 err = pci_enable_device(pdev);
4942 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004943 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004944 return err;
4945 }
4946 pci_set_master(pdev);
4947
4948 pci_enable_wake(pdev, PCI_D3hot, 0);
4949 pci_enable_wake(pdev, PCI_D3cold, 0);
4950
4951 if (netif_running(ndev)) {
4952 err = ql_adapter_up(qdev);
4953 if (err)
4954 return err;
4955 }
4956
Breno Leitao72046d82010-07-01 03:00:17 +00004957 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004958 netif_device_attach(ndev);
4959
4960 return 0;
4961}
David S. Miller04da2cf2008-09-19 16:14:24 -07004962#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004963
4964static void qlge_shutdown(struct pci_dev *pdev)
4965{
4966 qlge_suspend(pdev, PMSG_SUSPEND);
4967}
4968
4969static struct pci_driver qlge_driver = {
4970 .name = DRV_NAME,
4971 .id_table = qlge_pci_tbl,
4972 .probe = qlge_probe,
4973 .remove = __devexit_p(qlge_remove),
4974#ifdef CONFIG_PM
4975 .suspend = qlge_suspend,
4976 .resume = qlge_resume,
4977#endif
4978 .shutdown = qlge_shutdown,
4979 .err_handler = &qlge_err_handler
4980};
4981
4982static int __init qlge_init_module(void)
4983{
4984 return pci_register_driver(&qlge_driver);
4985}
4986
4987static void __exit qlge_exit(void)
4988{
4989 pci_unregister_driver(&qlge_driver);
4990}
4991
4992module_init(qlge_init_module);
4993module_exit(qlge_exit);