blob: 49343ec21c820eda0441a2f274c2a7d3c667d50c [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
Ron Mercer76b26692009-10-08 09:54:40 +0000408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400426 status =
427 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 status = -EPERM;
442 }
443exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 return status;
445}
446
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000447/* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452{
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000458 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000476 return status;
477}
478
Ron Mercer6a473302009-07-02 06:06:12 +0000479void ql_link_on(struct ql_adapter *qdev)
480{
Joe Perchesae9540f72010-02-09 11:49:52 +0000481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484}
485
486void ql_link_off(struct ql_adapter *qdev)
487{
Joe Perchesae9540f72010-02-09 11:49:52 +0000488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000572 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000579 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 return status;
615}
616
617static void ql_enable_interrupts(struct ql_adapter *qdev)
618{
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620}
621
622static void ql_disable_interrupts(struct ql_adapter *qdev)
623{
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625}
626
627/* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300630 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700633u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657}
658
659static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660{
661 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000671 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700672 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 var = ql_read32(qdev, STS);
676 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000678 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 return var;
680}
681
682static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683{
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696}
697
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000698static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699{
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000716
717 return csum;
718}
719
Ron Mercer26351472009-02-02 13:53:57 -0800720static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400740exit:
741 return status;
742}
743
Ron Mercercdca8d02009-03-02 08:07:31 +0000744static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745{
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000750 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
Ron Mercere4552f52009-06-09 05:39:32 +0000755 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 status = -EINVAL;
779 goto exit;
780 }
781
Ron Mercer542512e2009-06-09 05:39:33 +0000782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000801 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 qdev->ndev->addr_len);
803
804exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807}
808
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000809static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400810{
811 int i;
812 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800813 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800814 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816
817 /* Second function's parameters follow the first
818 * function's.
819 */
Ron Mercere4552f52009-06-09 05:39:32 +0000820 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000821 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800827 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 goto exit;
832 }
833
834 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400854exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857}
858
859/* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864{
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876}
877
878/* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883{
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899exit:
900 return status;
901}
902
903/* This is used for reading the 64-bit statistics regs. */
904int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905{
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920exit:
921 return status;
922}
923
Ron Mercercdca8d02009-03-02 08:07:31 +0000924static int ql_8000_port_initialize(struct ql_adapter *qdev)
925{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000926 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939exit:
940 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000941}
942
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400943/* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000949static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400950{
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964 }
965 return status;
966 }
967
Joe Perchesae9540f72010-02-09 11:49:52 +0000968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022}
1023
Ron Mercer7c734352009-10-19 03:32:19 +00001024static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025{
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027}
1028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001029/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001030static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031{
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038}
1039
Ron Mercer7c734352009-10-19 03:32:19 +00001040static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042{
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001046 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071}
1072
1073/* Update an rx ring index. */
1074static void ql_update_cq(struct rx_ring *rx_ring)
1075{
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082}
1083
1084static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085{
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087}
1088
Ron Mercer7c734352009-10-19 03:32:19 +00001089static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091{
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001136/* Process (refill) a large buffer queue. */
1137static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138{
Ron Mercer49f21862009-02-23 10:42:16 +00001139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001141 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001142 u64 map;
1143 int i;
1144
Ron Mercer7c734352009-10-19 03:32:19 +00001145 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001152 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001153 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 return;
1157 }
Ron Mercer7c734352009-10-19 03:32:19 +00001158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001163 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001164 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 }
1188}
1189
1190/* Process (refill) a small buffer queue. */
1191static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192{
Ron Mercer49f21862009-02-23 10:42:16 +00001193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001195 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001205 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001212 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Ron Mercer4f848c02010-01-02 10:37:43 +00001436/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001437static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 struct rx_ring *rx_ring,
1439 struct ib_mac_iocb_rsp *ib_mac_rsp,
1440 u32 length,
1441 u16 vlan_id)
1442{
1443 struct sk_buff *skb;
1444 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001445 struct napi_struct *napi = &rx_ring->napi;
1446
1447 napi->dev = qdev->ndev;
1448
1449 skb = napi_get_frags(napi);
1450 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001451 netif_err(qdev, drv, qdev->ndev,
1452 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001453 rx_ring->rx_dropped++;
1454 put_page(lbq_desc->p.pg_chunk.page);
1455 return;
1456 }
1457 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001458 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1459 lbq_desc->p.pg_chunk.page,
1460 lbq_desc->p.pg_chunk.offset,
1461 length);
Ron Mercer63526712010-01-02 10:37:44 +00001462
1463 skb->len += length;
1464 skb->data_len += length;
1465 skb->truesize += length;
1466 skb_shinfo(skb)->nr_frags++;
1467
1468 rx_ring->rx_packets++;
1469 rx_ring->rx_bytes += length;
1470 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001472 if (vlan_id != 0xffff)
1473 __vlan_hwaccel_put_tag(skb, vlan_id);
1474 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001475}
1476
1477/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001478static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1479 struct rx_ring *rx_ring,
1480 struct ib_mac_iocb_rsp *ib_mac_rsp,
1481 u32 length,
1482 u16 vlan_id)
1483{
1484 struct net_device *ndev = qdev->ndev;
1485 struct sk_buff *skb = NULL;
1486 void *addr;
1487 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1488 struct napi_struct *napi = &rx_ring->napi;
1489
1490 skb = netdev_alloc_skb(ndev, length);
1491 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001492 netif_err(qdev, drv, qdev->ndev,
1493 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001494 rx_ring->rx_dropped++;
1495 put_page(lbq_desc->p.pg_chunk.page);
1496 return;
1497 }
1498
1499 addr = lbq_desc->p.pg_chunk.va;
1500 prefetch(addr);
1501
1502
1503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001505 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001506 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001507 rx_ring->rx_errors++;
1508 goto err_out;
1509 }
1510
1511 /* The max framesize filter on this chip is set higher than
1512 * MTU since FCoE uses 2k frames.
1513 */
1514 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001515 netif_err(qdev, drv, qdev->ndev,
1516 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001517 rx_ring->rx_dropped++;
1518 goto err_out;
1519 }
1520 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1523 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001524 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1525 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1526 length-ETH_HLEN);
1527 skb->len += length-ETH_HLEN;
1528 skb->data_len += length-ETH_HLEN;
1529 skb->truesize += length-ETH_HLEN;
1530
1531 rx_ring->rx_packets++;
1532 rx_ring->rx_bytes += skb->len;
1533 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001534 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001535
Michał Mirosław88230fd2011-04-18 13:31:21 +00001536 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001537 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1538 /* TCP frame. */
1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1543 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1544 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1545 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001546 struct iphdr *iph =
1547 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001548 if (!(iph->frag_off &
1549 cpu_to_be16(IP_MF|IP_OFFSET))) {
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001551 netif_printk(qdev, rx_status, KERN_DEBUG,
1552 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001553 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 }
1555 }
1556 }
1557
1558 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001559 if (vlan_id != 0xffff)
1560 __vlan_hwaccel_put_tag(skb, vlan_id);
1561 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1562 napi_gro_receive(napi, skb);
1563 else
1564 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001565 return;
1566err_out:
1567 dev_kfree_skb_any(skb);
1568 put_page(lbq_desc->p.pg_chunk.page);
1569}
1570
1571/* Process an inbound completion from an rx ring. */
1572static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp,
1575 u32 length,
1576 u16 vlan_id)
1577{
1578 struct net_device *ndev = qdev->ndev;
1579 struct sk_buff *skb = NULL;
1580 struct sk_buff *new_skb = NULL;
1581 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1582
1583 skb = sbq_desc->p.skb;
1584 /* Allocate new_skb and copy */
1585 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1586 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001587 netif_err(qdev, probe, qdev->ndev,
1588 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001589 rx_ring->rx_dropped++;
1590 return;
1591 }
1592 skb_reserve(new_skb, NET_IP_ALIGN);
1593 memcpy(skb_put(new_skb, length), skb->data, length);
1594 skb = new_skb;
1595
1596 /* Frame error, so drop the packet. */
1597 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001598 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001599 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001600 dev_kfree_skb_any(skb);
1601 rx_ring->rx_errors++;
1602 return;
1603 }
1604
1605 /* loopback self test for ethtool */
1606 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1607 ql_check_lb_frame(qdev, skb);
1608 dev_kfree_skb_any(skb);
1609 return;
1610 }
1611
1612 /* The max framesize filter on this chip is set higher than
1613 * MTU since FCoE uses 2k frames.
1614 */
1615 if (skb->len > ndev->mtu + ETH_HLEN) {
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_dropped++;
1618 return;
1619 }
1620
1621 prefetch(skb->data);
1622 skb->dev = ndev;
1623 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001624 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1625 "%s Multicast.\n",
1626 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1627 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1628 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1629 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1630 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1631 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001632 }
1633 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001634 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1635 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001636
1637 rx_ring->rx_packets++;
1638 rx_ring->rx_bytes += skb->len;
1639 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001640 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001641
1642 /* If rx checksum is on, and there are no
1643 * csum or frame errors.
1644 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001645 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001646 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1647 /* TCP frame. */
1648 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001649 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1650 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001651 skb->ip_summed = CHECKSUM_UNNECESSARY;
1652 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1653 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1654 /* Unfragmented ipv4 UDP frame. */
1655 struct iphdr *iph = (struct iphdr *) skb->data;
1656 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001657 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001658 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001659 netif_printk(qdev, rx_status, KERN_DEBUG,
1660 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001661 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001662 }
1663 }
1664 }
1665
1666 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001667 if (vlan_id != 0xffff)
1668 __vlan_hwaccel_put_tag(skb, vlan_id);
1669 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1670 napi_gro_receive(&rx_ring->napi, skb);
1671 else
1672 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001673}
1674
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001675static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001676{
1677 void *temp_addr = skb->data;
1678
1679 /* Undo the skb_reserve(skb,32) we did before
1680 * giving to hardware, and realign data on
1681 * a 2-byte boundary.
1682 */
1683 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1684 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1685 skb_copy_to_linear_data(skb, temp_addr,
1686 (unsigned int)len);
1687}
1688
1689/*
1690 * This function builds an skb for the given inbound
1691 * completion. It will be rewritten for readability in the near
1692 * future, but for not it works well.
1693 */
1694static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1695 struct rx_ring *rx_ring,
1696 struct ib_mac_iocb_rsp *ib_mac_rsp)
1697{
1698 struct bq_desc *lbq_desc;
1699 struct bq_desc *sbq_desc;
1700 struct sk_buff *skb = NULL;
1701 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1702 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1703
1704 /*
1705 * Handle the header buffer if present.
1706 */
1707 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1708 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001709 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1710 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001711 /*
1712 * Headers fit nicely into a small buffer.
1713 */
1714 sbq_desc = ql_get_curr_sbuf(rx_ring);
1715 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001716 dma_unmap_addr(sbq_desc, mapaddr),
1717 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001718 PCI_DMA_FROMDEVICE);
1719 skb = sbq_desc->p.skb;
1720 ql_realign_skb(skb, hdr_len);
1721 skb_put(skb, hdr_len);
1722 sbq_desc->p.skb = NULL;
1723 }
1724
1725 /*
1726 * Handle the data buffer(s).
1727 */
1728 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001729 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1730 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001731 return skb;
1732 }
1733
1734 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1735 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001736 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1737 "Headers in small, data of %d bytes in small, combine them.\n",
1738 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001739 /*
1740 * Data is less than small buffer size so it's
1741 * stuffed in a small buffer.
1742 * For this case we append the data
1743 * from the "data" small buffer to the "header" small
1744 * buffer.
1745 */
1746 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001748 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001749 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001750 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001751 (sbq_desc, maplen),
1752 PCI_DMA_FROMDEVICE);
1753 memcpy(skb_put(skb, length),
1754 sbq_desc->p.skb->data, length);
1755 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001756 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001757 (sbq_desc,
1758 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001759 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760 (sbq_desc,
1761 maplen),
1762 PCI_DMA_FROMDEVICE);
1763 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001764 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1765 "%d bytes in a single small buffer.\n",
1766 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001767 sbq_desc = ql_get_curr_sbuf(rx_ring);
1768 skb = sbq_desc->p.skb;
1769 ql_realign_skb(skb, length);
1770 skb_put(skb, length);
1771 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001772 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001773 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001774 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001775 maplen),
1776 PCI_DMA_FROMDEVICE);
1777 sbq_desc->p.skb = NULL;
1778 }
1779 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1780 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001781 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1782 "Header in small, %d bytes in large. Chain large to small!\n",
1783 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001784 /*
1785 * The data is in a single large buffer. We
1786 * chain it to the header buffer's skb and let
1787 * it rip.
1788 */
Ron Mercer7c734352009-10-19 03:32:19 +00001789 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001790 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1791 "Chaining page at offset = %d, for %d bytes to skb.\n",
1792 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001793 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1794 lbq_desc->p.pg_chunk.offset,
1795 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 skb->len += length;
1797 skb->data_len += length;
1798 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001799 } else {
1800 /*
1801 * The headers and data are in a single large buffer. We
1802 * copy it to a new skb and let it go. This can happen with
1803 * jumbo mtu on a non-TCP/UDP frame.
1804 */
Ron Mercer7c734352009-10-19 03:32:19 +00001805 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 skb = netdev_alloc_skb(qdev->ndev, length);
1807 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001808 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1809 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001810 return NULL;
1811 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001812 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001813 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001814 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001815 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001816 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001817 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001818 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1819 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1820 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001821 skb_fill_page_desc(skb, 0,
1822 lbq_desc->p.pg_chunk.page,
1823 lbq_desc->p.pg_chunk.offset,
1824 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001825 skb->len += length;
1826 skb->data_len += length;
1827 skb->truesize += length;
1828 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001829 __pskb_pull_tail(skb,
1830 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1831 VLAN_ETH_HLEN : ETH_HLEN);
1832 }
1833 } else {
1834 /*
1835 * The data is in a chain of large buffers
1836 * pointed to by a small buffer. We loop
1837 * thru and chain them to the our small header
1838 * buffer's skb.
1839 * frags: There are 18 max frags and our small
1840 * buffer will hold 32 of them. The thing is,
1841 * we'll use 3 max for our 9000 byte jumbo
1842 * frames. If the MTU goes up we could
1843 * eventually be in trouble.
1844 */
Ron Mercer7c734352009-10-19 03:32:19 +00001845 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001846 sbq_desc = ql_get_curr_sbuf(rx_ring);
1847 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001848 dma_unmap_addr(sbq_desc, mapaddr),
1849 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001850 PCI_DMA_FROMDEVICE);
1851 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1852 /*
1853 * This is an non TCP/UDP IP frame, so
1854 * the headers aren't split into a small
1855 * buffer. We have to use the small buffer
1856 * that contains our sg list as our skb to
1857 * send upstairs. Copy the sg list here to
1858 * a local buffer and use it to find the
1859 * pages to chain.
1860 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001861 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1862 "%d bytes of headers & data in chain of large.\n",
1863 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001864 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001865 sbq_desc->p.skb = NULL;
1866 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001867 }
1868 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001869 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1870 size = (length < rx_ring->lbq_buf_size) ? length :
1871 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001872
Joe Perchesae9540f72010-02-09 11:49:52 +00001873 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1874 "Adding page %d to skb for %d bytes.\n",
1875 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001876 skb_fill_page_desc(skb, i,
1877 lbq_desc->p.pg_chunk.page,
1878 lbq_desc->p.pg_chunk.offset,
1879 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001880 skb->len += size;
1881 skb->data_len += size;
1882 skb->truesize += size;
1883 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001884 i++;
1885 }
1886 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1887 VLAN_ETH_HLEN : ETH_HLEN);
1888 }
1889 return skb;
1890}
1891
1892/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001893static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001894 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001895 struct ib_mac_iocb_rsp *ib_mac_rsp,
1896 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001897{
1898 struct net_device *ndev = qdev->ndev;
1899 struct sk_buff *skb = NULL;
1900
1901 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1902
1903 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1904 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001905 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001907 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001908 return;
1909 }
1910
Ron Mercera32959c2009-06-09 05:39:27 +00001911 /* Frame error, so drop the packet. */
1912 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001913 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001914 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001915 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001916 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001917 return;
1918 }
Ron Mercerec33a492009-06-09 05:39:28 +00001919
1920 /* The max framesize filter on this chip is set higher than
1921 * MTU since FCoE uses 2k frames.
1922 */
1923 if (skb->len > ndev->mtu + ETH_HLEN) {
1924 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001925 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001926 return;
1927 }
1928
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001929 /* loopback self test for ethtool */
1930 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1931 ql_check_lb_frame(qdev, skb);
1932 dev_kfree_skb_any(skb);
1933 return;
1934 }
1935
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001936 prefetch(skb->data);
1937 skb->dev = ndev;
1938 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001939 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1944 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1945 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001946 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001947 }
1948 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001949 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1950 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001951 }
Ron Mercerd555f592009-03-09 10:59:19 +00001952
Ron Mercerd555f592009-03-09 10:59:19 +00001953 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001954 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001955
1956 /* If rx checksum is on, and there are no
1957 * csum or frame errors.
1958 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001959 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001960 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1961 /* TCP frame. */
1962 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001963 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1964 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001965 skb->ip_summed = CHECKSUM_UNNECESSARY;
1966 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1967 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1968 /* Unfragmented ipv4 UDP frame. */
1969 struct iphdr *iph = (struct iphdr *) skb->data;
1970 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001971 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00001972 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001973 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1974 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001975 }
1976 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001977 }
Ron Mercerd555f592009-03-09 10:59:19 +00001978
Ron Mercer885ee392009-11-03 13:49:31 +00001979 rx_ring->rx_packets++;
1980 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001981 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001982 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1983 __vlan_hwaccel_put_tag(skb, vlan_id);
1984 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1985 napi_gro_receive(&rx_ring->napi, skb);
1986 else
1987 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001988}
1989
Ron Mercer4f848c02010-01-02 10:37:43 +00001990/* Process an inbound completion from an rx ring. */
1991static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1992 struct rx_ring *rx_ring,
1993 struct ib_mac_iocb_rsp *ib_mac_rsp)
1994{
1995 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1996 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1997 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1998 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1999
2000 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2001
2002 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2003 /* The data and headers are split into
2004 * separate buffers.
2005 */
2006 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2007 vlan_id);
2008 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2009 /* The data fit in a single small buffer.
2010 * Allocate a new skb, copy the data and
2011 * return the buffer to the free pool.
2012 */
2013 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2014 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002015 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2016 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2017 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2018 /* TCP packet in a page chunk that's been checksummed.
2019 * Tack it on to our GRO skb and let it go.
2020 */
2021 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2022 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002023 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2024 /* Non-TCP packet in a page chunk. Allocate an
2025 * skb, tack it on frags, and send it up.
2026 */
2027 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2028 length, vlan_id);
2029 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002030 /* Non-TCP/UDP large frames that span multiple buffers
2031 * can be processed corrrectly by the split frame logic.
2032 */
2033 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2034 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002035 }
2036
2037 return (unsigned long)length;
2038}
2039
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002040/* Process an outbound completion from an rx ring. */
2041static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2042 struct ob_mac_iocb_rsp *mac_rsp)
2043{
2044 struct tx_ring *tx_ring;
2045 struct tx_ring_desc *tx_ring_desc;
2046
2047 QL_DUMP_OB_MAC_RSP(mac_rsp);
2048 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2049 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2050 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002051 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2052 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002053 dev_kfree_skb(tx_ring_desc->skb);
2054 tx_ring_desc->skb = NULL;
2055
2056 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2057 OB_MAC_IOCB_RSP_S |
2058 OB_MAC_IOCB_RSP_L |
2059 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2060 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002061 netif_warn(qdev, tx_done, qdev->ndev,
2062 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002063 }
2064 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002065 netif_warn(qdev, tx_done, qdev->ndev,
2066 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002067 }
2068 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002069 netif_warn(qdev, tx_done, qdev->ndev,
2070 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002071 }
2072 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002073 netif_warn(qdev, tx_done, qdev->ndev,
2074 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002075 }
2076 }
2077 atomic_inc(&tx_ring->tx_count);
2078}
2079
2080/* Fire up a handler to reset the MPI processor. */
2081void ql_queue_fw_error(struct ql_adapter *qdev)
2082{
Ron Mercer6a473302009-07-02 06:06:12 +00002083 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002084 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2085}
2086
2087void ql_queue_asic_error(struct ql_adapter *qdev)
2088{
Ron Mercer6a473302009-07-02 06:06:12 +00002089 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002090 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002091 /* Clear adapter up bit to signal the recovery
2092 * process that it shouldn't kill the reset worker
2093 * thread
2094 */
2095 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002096 /* Set asic recovery bit to indicate reset process that we are
2097 * in fatal error recovery process rather than normal close
2098 */
2099 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002100 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2101}
2102
2103static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2104 struct ib_ae_iocb_rsp *ib_ae_rsp)
2105{
2106 switch (ib_ae_rsp->event) {
2107 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002108 netif_err(qdev, rx_err, qdev->ndev,
2109 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002110 ql_queue_fw_error(qdev);
2111 return;
2112
2113 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002114 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2115 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002116 ql_queue_asic_error(qdev);
2117 return;
2118
2119 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002120 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002121 ql_queue_asic_error(qdev);
2122 break;
2123
2124 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002125 netdev_err(qdev->ndev, "PCI error occurred when reading "
2126 "anonymous buffers from rx_ring %d.\n",
2127 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002128 ql_queue_asic_error(qdev);
2129 break;
2130
2131 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002132 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2133 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002134 ql_queue_asic_error(qdev);
2135 break;
2136 }
2137}
2138
2139static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2140{
2141 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002142 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002143 struct ob_mac_iocb_rsp *net_rsp = NULL;
2144 int count = 0;
2145
Ron Mercer1e213302009-03-09 10:59:21 +00002146 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002147 /* While there are entries in the completion queue. */
2148 while (prod != rx_ring->cnsmr_idx) {
2149
Joe Perchesae9540f72010-02-09 11:49:52 +00002150 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2151 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2152 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002153
2154 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2155 rmb();
2156 switch (net_rsp->opcode) {
2157
2158 case OPCODE_OB_MAC_TSO_IOCB:
2159 case OPCODE_OB_MAC_IOCB:
2160 ql_process_mac_tx_intr(qdev, net_rsp);
2161 break;
2162 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002163 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2164 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2165 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 }
2167 count++;
2168 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002169 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002170 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002171 if (!net_rsp)
2172 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002173 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002174 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002175 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002176 if (atomic_read(&tx_ring->queue_stopped) &&
2177 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2178 /*
2179 * The queue got stopped because the tx_ring was full.
2180 * Wake it up, because it's now at least 25% empty.
2181 */
Ron Mercer1e213302009-03-09 10:59:21 +00002182 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002183 }
2184
2185 return count;
2186}
2187
2188static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2189{
2190 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002191 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002192 struct ql_net_rsp_iocb *net_rsp;
2193 int count = 0;
2194
2195 /* While there are entries in the completion queue. */
2196 while (prod != rx_ring->cnsmr_idx) {
2197
Joe Perchesae9540f72010-02-09 11:49:52 +00002198 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2199 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2200 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201
2202 net_rsp = rx_ring->curr_entry;
2203 rmb();
2204 switch (net_rsp->opcode) {
2205 case OPCODE_IB_MAC_IOCB:
2206 ql_process_mac_rx_intr(qdev, rx_ring,
2207 (struct ib_mac_iocb_rsp *)
2208 net_rsp);
2209 break;
2210
2211 case OPCODE_IB_AE_IOCB:
2212 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2213 net_rsp);
2214 break;
2215 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002216 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2217 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2218 net_rsp->opcode);
2219 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002220 }
2221 count++;
2222 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 if (count == budget)
2225 break;
2226 }
2227 ql_update_buffer_queues(qdev, rx_ring);
2228 ql_write_cq_idx(rx_ring);
2229 return count;
2230}
2231
2232static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2233{
2234 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2235 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002236 struct rx_ring *trx_ring;
2237 int i, work_done = 0;
2238 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239
Joe Perchesae9540f72010-02-09 11:49:52 +00002240 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2241 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002242
Ron Mercer39aa8162009-08-27 11:02:11 +00002243 /* Service the TX rings first. They start
2244 * right after the RSS rings. */
2245 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2246 trx_ring = &qdev->rx_ring[i];
2247 /* If this TX completion ring belongs to this vector and
2248 * it's not empty then service it.
2249 */
2250 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2251 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2252 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002253 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2254 "%s: Servicing TX completion ring %d.\n",
2255 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002256 ql_clean_outbound_rx_ring(trx_ring);
2257 }
2258 }
2259
2260 /*
2261 * Now service the RSS ring if it's active.
2262 */
2263 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2264 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002265 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2266 "%s: Servicing RX completion ring %d.\n",
2267 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002268 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2269 }
2270
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002272 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002273 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2274 }
2275 return work_done;
2276}
2277
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002278static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002279{
2280 struct ql_adapter *qdev = netdev_priv(ndev);
2281
Jiri Pirko18c49b92011-07-21 03:24:11 +00002282 if (features & NETIF_F_HW_VLAN_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002283 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002284 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002285 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002286 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2287 }
2288}
2289
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002290static netdev_features_t qlge_fix_features(struct net_device *ndev,
2291 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002292{
2293 /*
2294 * Since there is no support for separate rx/tx vlan accel
2295 * enable/disable make sure tx flag is always in same state as rx.
2296 */
2297 if (features & NETIF_F_HW_VLAN_RX)
2298 features |= NETIF_F_HW_VLAN_TX;
2299 else
2300 features &= ~NETIF_F_HW_VLAN_TX;
2301
2302 return features;
2303}
2304
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002305static int qlge_set_features(struct net_device *ndev,
2306 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002307{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002308 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002309
2310 if (changed & NETIF_F_HW_VLAN_RX)
2311 qlge_vlan_mode(ndev, features);
2312
2313 return 0;
2314}
2315
Jiri Pirko8e586132011-12-08 19:52:37 -05002316static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002317{
2318 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002319 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002320
Jiri Pirko8e586132011-12-08 19:52:37 -05002321 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2322 MAC_ADDR_TYPE_VLAN, vid);
2323 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002324 netif_err(qdev, ifup, qdev->ndev,
2325 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002326 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002327}
2328
Jiri Pirko8e586132011-12-08 19:52:37 -05002329static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002330{
2331 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002332 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002333 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002334
Ron Mercercc288f52009-02-23 10:42:14 +00002335 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2336 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002337 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002338
Jiri Pirko8e586132011-12-08 19:52:37 -05002339 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002340 set_bit(vid, qdev->active_vlans);
2341
Ron Mercercc288f52009-02-23 10:42:14 +00002342 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002343
2344 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345}
2346
Jiri Pirko8e586132011-12-08 19:52:37 -05002347static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002348{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002349 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002350 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002351
Jiri Pirko8e586132011-12-08 19:52:37 -05002352 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2353 MAC_ADDR_TYPE_VLAN, vid);
2354 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002355 netif_err(qdev, ifup, qdev->ndev,
2356 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002357 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002358}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002359
Jiri Pirko8e586132011-12-08 19:52:37 -05002360static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002361{
2362 struct ql_adapter *qdev = netdev_priv(ndev);
2363 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002364 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002365
2366 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002368 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002369
Jiri Pirko8e586132011-12-08 19:52:37 -05002370 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002371 clear_bit(vid, qdev->active_vlans);
2372
2373 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002374
2375 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002376}
2377
Ron Mercerc1b60092010-10-27 04:58:12 +00002378static void qlge_restore_vlan(struct ql_adapter *qdev)
2379{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002380 int status;
2381 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002382
Jiri Pirko18c49b92011-07-21 03:24:11 +00002383 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2384 if (status)
2385 return;
2386
2387 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2388 __qlge_vlan_rx_add_vid(qdev, vid);
2389
2390 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002391}
2392
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002393/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2394static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2395{
2396 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002397 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002398 return IRQ_HANDLED;
2399}
2400
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002401/* This handles a fatal error, MPI activity, and the default
2402 * rx_ring in an MSI-X multiple vector environment.
2403 * In MSI/Legacy environment it also process the rest of
2404 * the rx_rings.
2405 */
2406static irqreturn_t qlge_isr(int irq, void *dev_id)
2407{
2408 struct rx_ring *rx_ring = dev_id;
2409 struct ql_adapter *qdev = rx_ring->qdev;
2410 struct intr_context *intr_context = &qdev->intr_context[0];
2411 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002412 int work_done = 0;
2413
Ron Mercerbb0d2152008-10-20 10:30:26 -07002414 spin_lock(&qdev->hw_lock);
2415 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002416 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2417 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002418 spin_unlock(&qdev->hw_lock);
2419 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002420 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002421 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002422
Ron Mercerbb0d2152008-10-20 10:30:26 -07002423 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002424
2425 /*
2426 * Check for fatal error.
2427 */
2428 if (var & STS_FE) {
2429 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002430 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002431 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002432 netdev_err(qdev->ndev, "Resetting chip. "
2433 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434 return IRQ_HANDLED;
2435 }
2436
2437 /*
2438 * Check MPI processor activity.
2439 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002440 if ((var & STS_PI) &&
2441 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002442 /*
2443 * We've got an async event or mailbox completion.
2444 * Handle it and clear the source of the interrupt.
2445 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002446 netif_err(qdev, intr, qdev->ndev,
2447 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002449 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2450 queue_delayed_work_on(smp_processor_id(),
2451 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002452 work_done++;
2453 }
2454
2455 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002456 * Get the bit-mask that shows the active queues for this
2457 * pass. Compare it to the queues that this irq services
2458 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002459 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002460 var = ql_read32(qdev, ISR1);
2461 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002462 netif_info(qdev, intr, qdev->ndev,
2463 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002464 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002465 napi_schedule(&rx_ring->napi);
2466 work_done++;
2467 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002468 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002469 return work_done ? IRQ_HANDLED : IRQ_NONE;
2470}
2471
2472static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2473{
2474
2475 if (skb_is_gso(skb)) {
2476 int err;
2477 if (skb_header_cloned(skb)) {
2478 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 if (err)
2480 return err;
2481 }
2482
2483 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2484 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2485 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2486 mac_iocb_ptr->total_hdrs_len =
2487 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2488 mac_iocb_ptr->net_trans_offset =
2489 cpu_to_le16(skb_network_offset(skb) |
2490 skb_transport_offset(skb)
2491 << OB_MAC_TRANSPORT_HDR_SHIFT);
2492 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2493 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2494 if (likely(skb->protocol == htons(ETH_P_IP))) {
2495 struct iphdr *iph = ip_hdr(skb);
2496 iph->check = 0;
2497 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2498 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2499 iph->daddr, 0,
2500 IPPROTO_TCP,
2501 0);
2502 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2503 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2504 tcp_hdr(skb)->check =
2505 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2506 &ipv6_hdr(skb)->daddr,
2507 0, IPPROTO_TCP, 0);
2508 }
2509 return 1;
2510 }
2511 return 0;
2512}
2513
2514static void ql_hw_csum_setup(struct sk_buff *skb,
2515 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2516{
2517 int len;
2518 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002519 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002520 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2521 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2522 mac_iocb_ptr->net_trans_offset =
2523 cpu_to_le16(skb_network_offset(skb) |
2524 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2525
2526 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2527 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2528 if (likely(iph->protocol == IPPROTO_TCP)) {
2529 check = &(tcp_hdr(skb)->check);
2530 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2531 mac_iocb_ptr->total_hdrs_len =
2532 cpu_to_le16(skb_transport_offset(skb) +
2533 (tcp_hdr(skb)->doff << 2));
2534 } else {
2535 check = &(udp_hdr(skb)->check);
2536 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2537 mac_iocb_ptr->total_hdrs_len =
2538 cpu_to_le16(skb_transport_offset(skb) +
2539 sizeof(struct udphdr));
2540 }
2541 *check = ~csum_tcpudp_magic(iph->saddr,
2542 iph->daddr, len, iph->protocol, 0);
2543}
2544
Stephen Hemminger613573252009-08-31 19:50:58 +00002545static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002546{
2547 struct tx_ring_desc *tx_ring_desc;
2548 struct ob_mac_iocb_req *mac_iocb_ptr;
2549 struct ql_adapter *qdev = netdev_priv(ndev);
2550 int tso;
2551 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002552 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002553
2554 tx_ring = &qdev->tx_ring[tx_ring_idx];
2555
Ron Mercer74c50b42009-03-09 10:59:27 +00002556 if (skb_padto(skb, ETH_ZLEN))
2557 return NETDEV_TX_OK;
2558
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002559 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002560 netif_info(qdev, tx_queued, qdev->ndev,
2561 "%s: shutting down tx queue %d du to lack of resources.\n",
2562 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002563 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002564 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002565 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002566 return NETDEV_TX_BUSY;
2567 }
2568 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2569 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002570 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002571
2572 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2573 mac_iocb_ptr->tid = tx_ring_desc->index;
2574 /* We use the upper 32-bits to store the tx queue for this IO.
2575 * When we get the completion we can use it to establish the context.
2576 */
2577 mac_iocb_ptr->txq_idx = tx_ring_idx;
2578 tx_ring_desc->skb = skb;
2579
2580 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2581
Jesse Grosseab6d182010-10-20 13:56:03 +00002582 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002583 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2584 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002585 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2586 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2587 }
2588 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2589 if (tso < 0) {
2590 dev_kfree_skb_any(skb);
2591 return NETDEV_TX_OK;
2592 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2593 ql_hw_csum_setup(skb,
2594 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2595 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002596 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2597 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002598 netif_err(qdev, tx_queued, qdev->ndev,
2599 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002600 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002601 return NETDEV_TX_BUSY;
2602 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002603 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2604 tx_ring->prod_idx++;
2605 if (tx_ring->prod_idx == tx_ring->wq_len)
2606 tx_ring->prod_idx = 0;
2607 wmb();
2608
2609 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002610 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2611 "tx queued, slot %d, len %d\n",
2612 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002613
2614 atomic_dec(&tx_ring->tx_count);
2615 return NETDEV_TX_OK;
2616}
2617
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002618
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002619static void ql_free_shadow_space(struct ql_adapter *qdev)
2620{
2621 if (qdev->rx_ring_shadow_reg_area) {
2622 pci_free_consistent(qdev->pdev,
2623 PAGE_SIZE,
2624 qdev->rx_ring_shadow_reg_area,
2625 qdev->rx_ring_shadow_reg_dma);
2626 qdev->rx_ring_shadow_reg_area = NULL;
2627 }
2628 if (qdev->tx_ring_shadow_reg_area) {
2629 pci_free_consistent(qdev->pdev,
2630 PAGE_SIZE,
2631 qdev->tx_ring_shadow_reg_area,
2632 qdev->tx_ring_shadow_reg_dma);
2633 qdev->tx_ring_shadow_reg_area = NULL;
2634 }
2635}
2636
2637static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2638{
2639 qdev->rx_ring_shadow_reg_area =
2640 pci_alloc_consistent(qdev->pdev,
2641 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2642 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002643 netif_err(qdev, ifup, qdev->ndev,
2644 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002645 return -ENOMEM;
2646 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002647 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002648 qdev->tx_ring_shadow_reg_area =
2649 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2650 &qdev->tx_ring_shadow_reg_dma);
2651 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002652 netif_err(qdev, ifup, qdev->ndev,
2653 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002654 goto err_wqp_sh_area;
2655 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002656 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002657 return 0;
2658
2659err_wqp_sh_area:
2660 pci_free_consistent(qdev->pdev,
2661 PAGE_SIZE,
2662 qdev->rx_ring_shadow_reg_area,
2663 qdev->rx_ring_shadow_reg_dma);
2664 return -ENOMEM;
2665}
2666
2667static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2668{
2669 struct tx_ring_desc *tx_ring_desc;
2670 int i;
2671 struct ob_mac_iocb_req *mac_iocb_ptr;
2672
2673 mac_iocb_ptr = tx_ring->wq_base;
2674 tx_ring_desc = tx_ring->q;
2675 for (i = 0; i < tx_ring->wq_len; i++) {
2676 tx_ring_desc->index = i;
2677 tx_ring_desc->skb = NULL;
2678 tx_ring_desc->queue_entry = mac_iocb_ptr;
2679 mac_iocb_ptr++;
2680 tx_ring_desc++;
2681 }
2682 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2683 atomic_set(&tx_ring->queue_stopped, 0);
2684}
2685
2686static void ql_free_tx_resources(struct ql_adapter *qdev,
2687 struct tx_ring *tx_ring)
2688{
2689 if (tx_ring->wq_base) {
2690 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2691 tx_ring->wq_base, tx_ring->wq_base_dma);
2692 tx_ring->wq_base = NULL;
2693 }
2694 kfree(tx_ring->q);
2695 tx_ring->q = NULL;
2696}
2697
2698static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2699 struct tx_ring *tx_ring)
2700{
2701 tx_ring->wq_base =
2702 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2703 &tx_ring->wq_base_dma);
2704
Joe Perches8e95a202009-12-03 07:58:21 +00002705 if ((tx_ring->wq_base == NULL) ||
2706 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002707 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002708 return -ENOMEM;
2709 }
2710 tx_ring->q =
2711 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2712 if (tx_ring->q == NULL)
2713 goto err;
2714
2715 return 0;
2716err:
2717 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2718 tx_ring->wq_base, tx_ring->wq_base_dma);
2719 return -ENOMEM;
2720}
2721
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002722static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002723{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002724 struct bq_desc *lbq_desc;
2725
Ron Mercer7c734352009-10-19 03:32:19 +00002726 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002727
Ron Mercer7c734352009-10-19 03:32:19 +00002728 curr_idx = rx_ring->lbq_curr_idx;
2729 clean_idx = rx_ring->lbq_clean_idx;
2730 while (curr_idx != clean_idx) {
2731 lbq_desc = &rx_ring->lbq[curr_idx];
2732
2733 if (lbq_desc->p.pg_chunk.last_flag) {
2734 pci_unmap_page(qdev->pdev,
2735 lbq_desc->p.pg_chunk.map,
2736 ql_lbq_block_size(qdev),
2737 PCI_DMA_FROMDEVICE);
2738 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002739 }
Ron Mercer7c734352009-10-19 03:32:19 +00002740
2741 put_page(lbq_desc->p.pg_chunk.page);
2742 lbq_desc->p.pg_chunk.page = NULL;
2743
2744 if (++curr_idx == rx_ring->lbq_len)
2745 curr_idx = 0;
2746
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747 }
2748}
2749
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002750static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002751{
2752 int i;
2753 struct bq_desc *sbq_desc;
2754
2755 for (i = 0; i < rx_ring->sbq_len; i++) {
2756 sbq_desc = &rx_ring->sbq[i];
2757 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002758 netif_err(qdev, ifup, qdev->ndev,
2759 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002760 return;
2761 }
2762 if (sbq_desc->p.skb) {
2763 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002764 dma_unmap_addr(sbq_desc, mapaddr),
2765 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002766 PCI_DMA_FROMDEVICE);
2767 dev_kfree_skb(sbq_desc->p.skb);
2768 sbq_desc->p.skb = NULL;
2769 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002770 }
2771}
2772
Ron Mercer4545a3f2009-02-23 10:42:17 +00002773/* Free all large and small rx buffers associated
2774 * with the completion queues for this device.
2775 */
2776static void ql_free_rx_buffers(struct ql_adapter *qdev)
2777{
2778 int i;
2779 struct rx_ring *rx_ring;
2780
2781 for (i = 0; i < qdev->rx_ring_count; i++) {
2782 rx_ring = &qdev->rx_ring[i];
2783 if (rx_ring->lbq)
2784 ql_free_lbq_buffers(qdev, rx_ring);
2785 if (rx_ring->sbq)
2786 ql_free_sbq_buffers(qdev, rx_ring);
2787 }
2788}
2789
2790static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2791{
2792 struct rx_ring *rx_ring;
2793 int i;
2794
2795 for (i = 0; i < qdev->rx_ring_count; i++) {
2796 rx_ring = &qdev->rx_ring[i];
2797 if (rx_ring->type != TX_Q)
2798 ql_update_buffer_queues(qdev, rx_ring);
2799 }
2800}
2801
2802static void ql_init_lbq_ring(struct ql_adapter *qdev,
2803 struct rx_ring *rx_ring)
2804{
2805 int i;
2806 struct bq_desc *lbq_desc;
2807 __le64 *bq = rx_ring->lbq_base;
2808
2809 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2810 for (i = 0; i < rx_ring->lbq_len; i++) {
2811 lbq_desc = &rx_ring->lbq[i];
2812 memset(lbq_desc, 0, sizeof(*lbq_desc));
2813 lbq_desc->index = i;
2814 lbq_desc->addr = bq;
2815 bq++;
2816 }
2817}
2818
2819static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002820 struct rx_ring *rx_ring)
2821{
2822 int i;
2823 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002824 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002825
Ron Mercer4545a3f2009-02-23 10:42:17 +00002826 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002827 for (i = 0; i < rx_ring->sbq_len; i++) {
2828 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002829 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002830 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002831 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002832 bq++;
2833 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002834}
2835
2836static void ql_free_rx_resources(struct ql_adapter *qdev,
2837 struct rx_ring *rx_ring)
2838{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002839 /* Free the small buffer queue. */
2840 if (rx_ring->sbq_base) {
2841 pci_free_consistent(qdev->pdev,
2842 rx_ring->sbq_size,
2843 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2844 rx_ring->sbq_base = NULL;
2845 }
2846
2847 /* Free the small buffer queue control blocks. */
2848 kfree(rx_ring->sbq);
2849 rx_ring->sbq = NULL;
2850
2851 /* Free the large buffer queue. */
2852 if (rx_ring->lbq_base) {
2853 pci_free_consistent(qdev->pdev,
2854 rx_ring->lbq_size,
2855 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2856 rx_ring->lbq_base = NULL;
2857 }
2858
2859 /* Free the large buffer queue control blocks. */
2860 kfree(rx_ring->lbq);
2861 rx_ring->lbq = NULL;
2862
2863 /* Free the rx queue. */
2864 if (rx_ring->cq_base) {
2865 pci_free_consistent(qdev->pdev,
2866 rx_ring->cq_size,
2867 rx_ring->cq_base, rx_ring->cq_base_dma);
2868 rx_ring->cq_base = NULL;
2869 }
2870}
2871
2872/* Allocate queues and buffers for this completions queue based
2873 * on the values in the parameter structure. */
2874static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2875 struct rx_ring *rx_ring)
2876{
2877
2878 /*
2879 * Allocate the completion queue for this rx_ring.
2880 */
2881 rx_ring->cq_base =
2882 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2883 &rx_ring->cq_base_dma);
2884
2885 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002886 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002887 return -ENOMEM;
2888 }
2889
2890 if (rx_ring->sbq_len) {
2891 /*
2892 * Allocate small buffer queue.
2893 */
2894 rx_ring->sbq_base =
2895 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2896 &rx_ring->sbq_base_dma);
2897
2898 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002899 netif_err(qdev, ifup, qdev->ndev,
2900 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002901 goto err_mem;
2902 }
2903
2904 /*
2905 * Allocate small buffer queue control blocks.
2906 */
2907 rx_ring->sbq =
2908 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2909 GFP_KERNEL);
2910 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002911 netif_err(qdev, ifup, qdev->ndev,
2912 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002913 goto err_mem;
2914 }
2915
Ron Mercer4545a3f2009-02-23 10:42:17 +00002916 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002917 }
2918
2919 if (rx_ring->lbq_len) {
2920 /*
2921 * Allocate large buffer queue.
2922 */
2923 rx_ring->lbq_base =
2924 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2925 &rx_ring->lbq_base_dma);
2926
2927 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002928 netif_err(qdev, ifup, qdev->ndev,
2929 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002930 goto err_mem;
2931 }
2932 /*
2933 * Allocate large buffer queue control blocks.
2934 */
2935 rx_ring->lbq =
2936 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2937 GFP_KERNEL);
2938 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002939 netif_err(qdev, ifup, qdev->ndev,
2940 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941 goto err_mem;
2942 }
2943
Ron Mercer4545a3f2009-02-23 10:42:17 +00002944 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002945 }
2946
2947 return 0;
2948
2949err_mem:
2950 ql_free_rx_resources(qdev, rx_ring);
2951 return -ENOMEM;
2952}
2953
2954static void ql_tx_ring_clean(struct ql_adapter *qdev)
2955{
2956 struct tx_ring *tx_ring;
2957 struct tx_ring_desc *tx_ring_desc;
2958 int i, j;
2959
2960 /*
2961 * Loop through all queues and free
2962 * any resources.
2963 */
2964 for (j = 0; j < qdev->tx_ring_count; j++) {
2965 tx_ring = &qdev->tx_ring[j];
2966 for (i = 0; i < tx_ring->wq_len; i++) {
2967 tx_ring_desc = &tx_ring->q[i];
2968 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002969 netif_err(qdev, ifdown, qdev->ndev,
2970 "Freeing lost SKB %p, from queue %d, index %d.\n",
2971 tx_ring_desc->skb, j,
2972 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002973 ql_unmap_send(qdev, tx_ring_desc,
2974 tx_ring_desc->map_cnt);
2975 dev_kfree_skb(tx_ring_desc->skb);
2976 tx_ring_desc->skb = NULL;
2977 }
2978 }
2979 }
2980}
2981
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002982static void ql_free_mem_resources(struct ql_adapter *qdev)
2983{
2984 int i;
2985
2986 for (i = 0; i < qdev->tx_ring_count; i++)
2987 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2988 for (i = 0; i < qdev->rx_ring_count; i++)
2989 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2990 ql_free_shadow_space(qdev);
2991}
2992
2993static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2994{
2995 int i;
2996
2997 /* Allocate space for our shadow registers and such. */
2998 if (ql_alloc_shadow_space(qdev))
2999 return -ENOMEM;
3000
3001 for (i = 0; i < qdev->rx_ring_count; i++) {
3002 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003003 netif_err(qdev, ifup, qdev->ndev,
3004 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003005 goto err_mem;
3006 }
3007 }
3008 /* Allocate tx queue resources */
3009 for (i = 0; i < qdev->tx_ring_count; i++) {
3010 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003011 netif_err(qdev, ifup, qdev->ndev,
3012 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003013 goto err_mem;
3014 }
3015 }
3016 return 0;
3017
3018err_mem:
3019 ql_free_mem_resources(qdev);
3020 return -ENOMEM;
3021}
3022
3023/* Set up the rx ring control block and pass it to the chip.
3024 * The control block is defined as
3025 * "Completion Queue Initialization Control Block", or cqicb.
3026 */
3027static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3028{
3029 struct cqicb *cqicb = &rx_ring->cqicb;
3030 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003031 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003032 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003033 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003034 void __iomem *doorbell_area =
3035 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3036 int err = 0;
3037 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003038 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003039 __le64 *base_indirect_ptr;
3040 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003041
3042 /* Set up the shadow registers for this ring. */
3043 rx_ring->prod_idx_sh_reg = shadow_reg;
3044 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003045 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003046 shadow_reg += sizeof(u64);
3047 shadow_reg_dma += sizeof(u64);
3048 rx_ring->lbq_base_indirect = shadow_reg;
3049 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003050 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3051 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003052 rx_ring->sbq_base_indirect = shadow_reg;
3053 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3054
3055 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003056 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003057 rx_ring->cnsmr_idx = 0;
3058 rx_ring->curr_entry = rx_ring->cq_base;
3059
3060 /* PCI doorbell mem area + 0x04 for valid register */
3061 rx_ring->valid_db_reg = doorbell_area + 0x04;
3062
3063 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003064 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003065
3066 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003067 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003068
3069 memset((void *)cqicb, 0, sizeof(struct cqicb));
3070 cqicb->msix_vect = rx_ring->irq;
3071
Ron Mercer459caf52009-01-04 17:08:11 -08003072 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3073 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003074
Ron Mercer97345522009-01-09 11:31:50 +00003075 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076
Ron Mercer97345522009-01-09 11:31:50 +00003077 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003078
3079 /*
3080 * Set up the control block load flags.
3081 */
3082 cqicb->flags = FLAGS_LC | /* Load queue base address */
3083 FLAGS_LV | /* Load MSI-X vector */
3084 FLAGS_LI; /* Load irq delay values */
3085 if (rx_ring->lbq_len) {
3086 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003087 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003088 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003089 page_entries = 0;
3090 do {
3091 *base_indirect_ptr = cpu_to_le64(tmp);
3092 tmp += DB_PAGE_SIZE;
3093 base_indirect_ptr++;
3094 page_entries++;
3095 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003096 cqicb->lbq_addr =
3097 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003098 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3099 (u16) rx_ring->lbq_buf_size;
3100 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3101 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3102 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003103 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003104 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003105 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003106 rx_ring->lbq_clean_idx = 0;
3107 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003108 }
3109 if (rx_ring->sbq_len) {
3110 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003111 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003112 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003113 page_entries = 0;
3114 do {
3115 *base_indirect_ptr = cpu_to_le64(tmp);
3116 tmp += DB_PAGE_SIZE;
3117 base_indirect_ptr++;
3118 page_entries++;
3119 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003120 cqicb->sbq_addr =
3121 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003122 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003123 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003124 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3125 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003126 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003127 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003128 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003129 rx_ring->sbq_clean_idx = 0;
3130 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003131 }
3132 switch (rx_ring->type) {
3133 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3135 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3136 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003137 case RX_Q:
3138 /* Inbound completion handling rx_rings run in
3139 * separate NAPI contexts.
3140 */
3141 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3142 64);
3143 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3144 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3145 break;
3146 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003147 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3148 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003149 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003150 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3151 CFG_LCQ, rx_ring->cq_id);
3152 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003153 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 return err;
3155 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003156 return err;
3157}
3158
3159static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3160{
3161 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3162 void __iomem *doorbell_area =
3163 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3164 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3165 (tx_ring->wq_id * sizeof(u64));
3166 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3167 (tx_ring->wq_id * sizeof(u64));
3168 int err = 0;
3169
3170 /*
3171 * Assign doorbell registers for this tx_ring.
3172 */
3173 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003174 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003175 tx_ring->prod_idx = 0;
3176 /* TX PCI doorbell mem area + 0x04 */
3177 tx_ring->valid_db_reg = doorbell_area + 0x04;
3178
3179 /*
3180 * Assign shadow registers for this tx_ring.
3181 */
3182 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3183 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3184
3185 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3186 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3187 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3188 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3189 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003190 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003191
Ron Mercer97345522009-01-09 11:31:50 +00003192 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003193
3194 ql_init_tx_ring(qdev, tx_ring);
3195
Ron Mercere3324712009-07-02 06:06:13 +00003196 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003197 (u16) tx_ring->wq_id);
3198 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003199 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003200 return err;
3201 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003202 return err;
3203}
3204
3205static void ql_disable_msix(struct ql_adapter *qdev)
3206{
3207 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3208 pci_disable_msix(qdev->pdev);
3209 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3210 kfree(qdev->msi_x_entry);
3211 qdev->msi_x_entry = NULL;
3212 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3213 pci_disable_msi(qdev->pdev);
3214 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3215 }
3216}
3217
Ron Mercera4ab6132009-08-27 11:02:10 +00003218/* We start by trying to get the number of vectors
3219 * stored in qdev->intr_count. If we don't get that
3220 * many then we reduce the count and try again.
3221 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003222static void ql_enable_msix(struct ql_adapter *qdev)
3223{
Ron Mercera4ab6132009-08-27 11:02:10 +00003224 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003225
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003227 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003228 /* Try to alloc space for the msix struct,
3229 * if it fails then go to MSI/legacy.
3230 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003231 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232 sizeof(struct msix_entry),
3233 GFP_KERNEL);
3234 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003235 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 goto msi;
3237 }
3238
Ron Mercera4ab6132009-08-27 11:02:10 +00003239 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240 qdev->msi_x_entry[i].entry = i;
3241
Ron Mercera4ab6132009-08-27 11:02:10 +00003242 /* Loop to get our vectors. We start with
3243 * what we want and settle for what we get.
3244 */
3245 do {
3246 err = pci_enable_msix(qdev->pdev,
3247 qdev->msi_x_entry, qdev->intr_count);
3248 if (err > 0)
3249 qdev->intr_count = err;
3250 } while (err > 0);
3251
3252 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003253 kfree(qdev->msi_x_entry);
3254 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003255 netif_warn(qdev, ifup, qdev->ndev,
3256 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003257 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003258 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003259 } else if (err == 0) {
3260 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003261 netif_info(qdev, ifup, qdev->ndev,
3262 "MSI-X Enabled, got %d vectors.\n",
3263 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003264 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003265 }
3266 }
3267msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003268 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003269 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003270 if (!pci_enable_msi(qdev->pdev)) {
3271 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003272 netif_info(qdev, ifup, qdev->ndev,
3273 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003274 return;
3275 }
3276 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003277 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003278 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3279 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003280}
3281
Ron Mercer39aa8162009-08-27 11:02:11 +00003282/* Each vector services 1 RSS ring and and 1 or more
3283 * TX completion rings. This function loops through
3284 * the TX completion rings and assigns the vector that
3285 * will service it. An example would be if there are
3286 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3287 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003288 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003289 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3290 */
3291static void ql_set_tx_vect(struct ql_adapter *qdev)
3292{
3293 int i, j, vect;
3294 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3295
3296 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3297 /* Assign irq vectors to TX rx_rings.*/
3298 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3299 i < qdev->rx_ring_count; i++) {
3300 if (j == tx_rings_per_vector) {
3301 vect++;
3302 j = 0;
3303 }
3304 qdev->rx_ring[i].irq = vect;
3305 j++;
3306 }
3307 } else {
3308 /* For single vector all rings have an irq
3309 * of zero.
3310 */
3311 for (i = 0; i < qdev->rx_ring_count; i++)
3312 qdev->rx_ring[i].irq = 0;
3313 }
3314}
3315
3316/* Set the interrupt mask for this vector. Each vector
3317 * will service 1 RSS ring and 1 or more TX completion
3318 * rings. This function sets up a bit mask per vector
3319 * that indicates which rings it services.
3320 */
3321static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3322{
3323 int j, vect = ctx->intr;
3324 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3325
3326 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327 /* Add the RSS ring serviced by this vector
3328 * to the mask.
3329 */
3330 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3331 /* Add the TX ring(s) serviced by this vector
3332 * to the mask. */
3333 for (j = 0; j < tx_rings_per_vector; j++) {
3334 ctx->irq_mask |=
3335 (1 << qdev->rx_ring[qdev->rss_ring_count +
3336 (vect * tx_rings_per_vector) + j].cq_id);
3337 }
3338 } else {
3339 /* For single vector we just shift each queue's
3340 * ID into the mask.
3341 */
3342 for (j = 0; j < qdev->rx_ring_count; j++)
3343 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3344 }
3345}
3346
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003347/*
3348 * Here we build the intr_context structures based on
3349 * our rx_ring count and intr vector count.
3350 * The intr_context structure is used to hook each vector
3351 * to possibly different handlers.
3352 */
3353static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3354{
3355 int i = 0;
3356 struct intr_context *intr_context = &qdev->intr_context[0];
3357
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003358 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359 /* Each rx_ring has it's
3360 * own intr_context since we have separate
3361 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003362 */
3363 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3364 qdev->rx_ring[i].irq = i;
3365 intr_context->intr = i;
3366 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003367 /* Set up this vector's bit-mask that indicates
3368 * which queues it services.
3369 */
3370 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003371 /*
3372 * We set up each vectors enable/disable/read bits so
3373 * there's no bit/mask calculations in the critical path.
3374 */
3375 intr_context->intr_en_mask =
3376 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3377 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3378 | i;
3379 intr_context->intr_dis_mask =
3380 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3382 INTR_EN_IHD | i;
3383 intr_context->intr_read_mask =
3384 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3386 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003387 if (i == 0) {
3388 /* The first vector/queue handles
3389 * broadcast/multicast, fatal errors,
3390 * and firmware events. This in addition
3391 * to normal inbound NAPI processing.
3392 */
3393 intr_context->handler = qlge_isr;
3394 sprintf(intr_context->name, "%s-rx-%d",
3395 qdev->ndev->name, i);
3396 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003397 /*
3398 * Inbound queues handle unicast frames only.
3399 */
3400 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003401 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003402 qdev->ndev->name, i);
3403 }
3404 }
3405 } else {
3406 /*
3407 * All rx_rings use the same intr_context since
3408 * there is only one vector.
3409 */
3410 intr_context->intr = 0;
3411 intr_context->qdev = qdev;
3412 /*
3413 * We set up each vectors enable/disable/read bits so
3414 * there's no bit/mask calculations in the critical path.
3415 */
3416 intr_context->intr_en_mask =
3417 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3418 intr_context->intr_dis_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420 INTR_EN_TYPE_DISABLE;
3421 intr_context->intr_read_mask =
3422 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3423 /*
3424 * Single interrupt means one handler for all rings.
3425 */
3426 intr_context->handler = qlge_isr;
3427 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003428 /* Set up this vector's bit-mask that indicates
3429 * which queues it services. In this case there is
3430 * a single vector so it will service all RSS and
3431 * TX completion rings.
3432 */
3433 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003434 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003435 /* Tell the TX completion rings which MSIx vector
3436 * they will be using.
3437 */
3438 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003439}
3440
3441static void ql_free_irq(struct ql_adapter *qdev)
3442{
3443 int i;
3444 struct intr_context *intr_context = &qdev->intr_context[0];
3445
3446 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3447 if (intr_context->hooked) {
3448 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3449 free_irq(qdev->msi_x_entry[i].vector,
3450 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003451 } else {
3452 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003453 }
3454 }
3455 }
3456 ql_disable_msix(qdev);
3457}
3458
3459static int ql_request_irq(struct ql_adapter *qdev)
3460{
3461 int i;
3462 int status = 0;
3463 struct pci_dev *pdev = qdev->pdev;
3464 struct intr_context *intr_context = &qdev->intr_context[0];
3465
3466 ql_resolve_queues_to_irqs(qdev);
3467
3468 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3469 atomic_set(&intr_context->irq_cnt, 0);
3470 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3471 status = request_irq(qdev->msi_x_entry[i].vector,
3472 intr_context->handler,
3473 0,
3474 intr_context->name,
3475 &qdev->rx_ring[i]);
3476 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003477 netif_err(qdev, ifup, qdev->ndev,
3478 "Failed request for MSIX interrupt %d.\n",
3479 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003480 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003481 }
3482 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003483 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3484 "trying msi or legacy interrupts.\n");
3485 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3486 "%s: irq = %d.\n", __func__, pdev->irq);
3487 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3488 "%s: context->name = %s.\n", __func__,
3489 intr_context->name);
3490 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3491 "%s: dev_id = 0x%p.\n", __func__,
3492 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003493 status =
3494 request_irq(pdev->irq, qlge_isr,
3495 test_bit(QL_MSI_ENABLED,
3496 &qdev->
3497 flags) ? 0 : IRQF_SHARED,
3498 intr_context->name, &qdev->rx_ring[0]);
3499 if (status)
3500 goto err_irq;
3501
Joe Perchesae9540f72010-02-09 11:49:52 +00003502 netif_err(qdev, ifup, qdev->ndev,
3503 "Hooked intr %d, queue type %s, with name %s.\n",
3504 i,
3505 qdev->rx_ring[0].type == DEFAULT_Q ?
3506 "DEFAULT_Q" :
3507 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3508 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3509 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003510 }
3511 intr_context->hooked = 1;
3512 }
3513 return status;
3514err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003515 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003516 ql_free_irq(qdev);
3517 return status;
3518}
3519
3520static int ql_start_rss(struct ql_adapter *qdev)
3521{
Joe Perches215faf92010-12-21 02:16:10 -08003522 static const u8 init_hash_seed[] = {
3523 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3524 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3525 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3526 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3527 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3528 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003529 struct ricb *ricb = &qdev->ricb;
3530 int status = 0;
3531 int i;
3532 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3533
Ron Mercere3324712009-07-02 06:06:13 +00003534 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003535
Ron Mercerb2014ff2009-08-27 11:02:09 +00003536 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003537 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003538 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3539 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540
3541 /*
3542 * Fill out the Indirection Table.
3543 */
Ron Mercer541ae282009-10-08 09:54:37 +00003544 for (i = 0; i < 1024; i++)
3545 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003546
Ron Mercer541ae282009-10-08 09:54:37 +00003547 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3548 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549
Ron Mercere3324712009-07-02 06:06:13 +00003550 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003551 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003552 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553 return status;
3554 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003555 return status;
3556}
3557
Ron Mercera5f59dc2009-07-02 06:06:07 +00003558static int ql_clear_routing_entries(struct ql_adapter *qdev)
3559{
3560 int i, status = 0;
3561
3562 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3563 if (status)
3564 return status;
3565 /* Clear all the entries in the routing table. */
3566 for (i = 0; i < 16; i++) {
3567 status = ql_set_routing_reg(qdev, i, 0, 0);
3568 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003569 netif_err(qdev, ifup, qdev->ndev,
3570 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003571 break;
3572 }
3573 }
3574 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3575 return status;
3576}
3577
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003578/* Initialize the frame-to-queue routing. */
3579static int ql_route_initialize(struct ql_adapter *qdev)
3580{
3581 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003582
3583 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003584 status = ql_clear_routing_entries(qdev);
3585 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003586 return status;
3587
3588 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3589 if (status)
3590 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003591
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003592 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3593 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003594 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003595 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003596 "Failed to init routing register "
3597 "for IP CSUM error packets.\n");
3598 goto exit;
3599 }
3600 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3601 RT_IDX_TU_CSUM_ERR, 1);
3602 if (status) {
3603 netif_err(qdev, ifup, qdev->ndev,
3604 "Failed to init routing register "
3605 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003606 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003607 }
3608 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3609 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003610 netif_err(qdev, ifup, qdev->ndev,
3611 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003612 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003613 }
3614 /* If we have more than one inbound queue, then turn on RSS in the
3615 * routing block.
3616 */
3617 if (qdev->rss_ring_count > 1) {
3618 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3619 RT_IDX_RSS_MATCH, 1);
3620 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003621 netif_err(qdev, ifup, qdev->ndev,
3622 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003623 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003624 }
3625 }
3626
3627 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3628 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003629 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003630 netif_err(qdev, ifup, qdev->ndev,
3631 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003632exit:
3633 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003634 return status;
3635}
3636
Ron Mercer2ee1e272009-03-03 12:10:33 +00003637int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003638{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003639 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003640
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003641 /* If check if the link is up and use to
3642 * determine if we are setting or clearing
3643 * the MAC address in the CAM.
3644 */
3645 set = ql_read32(qdev, STS);
3646 set &= qdev->port_link_up;
3647 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003648 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003649 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003650 return status;
3651 }
3652
3653 status = ql_route_initialize(qdev);
3654 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003655 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003656
3657 return status;
3658}
3659
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003660static int ql_adapter_initialize(struct ql_adapter *qdev)
3661{
3662 u32 value, mask;
3663 int i;
3664 int status = 0;
3665
3666 /*
3667 * Set up the System register to halt on errors.
3668 */
3669 value = SYS_EFE | SYS_FAE;
3670 mask = value << 16;
3671 ql_write32(qdev, SYS, mask | value);
3672
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003673 /* Set the default queue, and VLAN behavior. */
3674 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3675 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003676 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3677
3678 /* Set the MPI interrupt to enabled. */
3679 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3680
3681 /* Enable the function, set pagesize, enable error checking. */
3682 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003683 FSC_EC | FSC_VM_PAGE_4K;
3684 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003685
3686 /* Set/clear header splitting. */
3687 mask = FSC_VM_PAGESIZE_MASK |
3688 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3689 ql_write32(qdev, FSC, mask | value);
3690
Ron Mercer572c5262010-01-02 10:37:42 +00003691 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003692
Ron Mercera3b71932009-10-08 09:54:38 +00003693 /* Set RX packet routing to use port/pci function on which the
3694 * packet arrived on in addition to usual frame routing.
3695 * This is helpful on bonding where both interfaces can have
3696 * the same MAC address.
3697 */
3698 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003699 /* Reroute all packets to our Interface.
3700 * They may have been routed to MPI firmware
3701 * due to WOL.
3702 */
3703 value = ql_read32(qdev, MGMT_RCV_CFG);
3704 value &= ~MGMT_RCV_CFG_RM;
3705 mask = 0xffff0000;
3706
3707 /* Sticky reg needs clearing due to WOL. */
3708 ql_write32(qdev, MGMT_RCV_CFG, mask);
3709 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3710
3711 /* Default WOL is enable on Mezz cards */
3712 if (qdev->pdev->subsystem_device == 0x0068 ||
3713 qdev->pdev->subsystem_device == 0x0180)
3714 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003715
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003716 /* Start up the rx queues. */
3717 for (i = 0; i < qdev->rx_ring_count; i++) {
3718 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3719 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003720 netif_err(qdev, ifup, qdev->ndev,
3721 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003722 return status;
3723 }
3724 }
3725
3726 /* If there is more than one inbound completion queue
3727 * then download a RICB to configure RSS.
3728 */
3729 if (qdev->rss_ring_count > 1) {
3730 status = ql_start_rss(qdev);
3731 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003732 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003733 return status;
3734 }
3735 }
3736
3737 /* Start up the tx queues. */
3738 for (i = 0; i < qdev->tx_ring_count; i++) {
3739 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3740 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003741 netif_err(qdev, ifup, qdev->ndev,
3742 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003743 return status;
3744 }
3745 }
3746
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003747 /* Initialize the port and set the max framesize. */
3748 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003749 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003750 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003751
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003752 /* Set up the MAC address and frame routing filter. */
3753 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003754 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003755 netif_err(qdev, ifup, qdev->ndev,
3756 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003757 return status;
3758 }
3759
3760 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003761 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003762 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003763
3764 return status;
3765}
3766
3767/* Issue soft reset to chip. */
3768static int ql_adapter_reset(struct ql_adapter *qdev)
3769{
3770 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003771 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003772 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003773
Ron Mercera5f59dc2009-07-02 06:06:07 +00003774 /* Clear all the entries in the routing table. */
3775 status = ql_clear_routing_entries(qdev);
3776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003777 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003778 return status;
3779 }
3780
3781 end_jiffies = jiffies +
3782 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003783
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003784 /* Check if bit is set then skip the mailbox command and
3785 * clear the bit, else we are in normal reset process.
3786 */
3787 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3788 /* Stop management traffic. */
3789 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003790
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003791 /* Wait for the NIC and MGMNT FIFOs to empty. */
3792 ql_wait_fifo_empty(qdev);
3793 } else
3794 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003795
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003796 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003797
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003798 do {
3799 value = ql_read32(qdev, RST_FO);
3800 if ((value & RST_FO_FR) == 0)
3801 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003802 cpu_relax();
3803 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003804
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003805 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003806 netif_err(qdev, ifdown, qdev->ndev,
3807 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003808 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003809 }
3810
Ron Mercer84087f42009-10-08 09:54:41 +00003811 /* Resume management traffic. */
3812 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 return status;
3814}
3815
3816static void ql_display_dev_info(struct net_device *ndev)
3817{
Joe Perchesb16fed02010-11-15 11:12:28 +00003818 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003819
Joe Perchesae9540f72010-02-09 11:49:52 +00003820 netif_info(qdev, probe, qdev->ndev,
3821 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3822 "XG Roll = %d, XG Rev = %d.\n",
3823 qdev->func,
3824 qdev->port,
3825 qdev->chip_rev_id & 0x0000000f,
3826 qdev->chip_rev_id >> 4 & 0x0000000f,
3827 qdev->chip_rev_id >> 8 & 0x0000000f,
3828 qdev->chip_rev_id >> 12 & 0x0000000f);
3829 netif_info(qdev, probe, qdev->ndev,
3830 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003831}
3832
stephen hemmingerac409212010-10-21 07:50:54 +00003833static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003834{
3835 int status = 0;
3836 u32 wol = MB_WOL_DISABLE;
3837
3838 /* The CAM is still intact after a reset, but if we
3839 * are doing WOL, then we may need to program the
3840 * routing regs. We would also need to issue the mailbox
3841 * commands to instruct the MPI what to do per the ethtool
3842 * settings.
3843 */
3844
3845 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3846 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003847 netif_err(qdev, ifdown, qdev->ndev,
3848 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3849 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003850 return -EINVAL;
3851 }
3852
3853 if (qdev->wol & WAKE_MAGIC) {
3854 status = ql_mb_wol_set_magic(qdev, 1);
3855 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003856 netif_err(qdev, ifdown, qdev->ndev,
3857 "Failed to set magic packet on %s.\n",
3858 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003859 return status;
3860 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003861 netif_info(qdev, drv, qdev->ndev,
3862 "Enabled magic packet successfully on %s.\n",
3863 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003864
3865 wol |= MB_WOL_MAGIC_PKT;
3866 }
3867
3868 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003869 wol |= MB_WOL_MODE_ON;
3870 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003871 netif_err(qdev, drv, qdev->ndev,
3872 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003873 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003874 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003875 }
3876
3877 return status;
3878}
3879
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003880static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003881{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003882
Ron Mercer6497b602009-02-12 16:37:13 -08003883 /* Don't kill the reset worker thread if we
3884 * are in the process of recovery.
3885 */
3886 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3887 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003888 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3889 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003890 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003891 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003892 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003893}
3894
3895static int ql_adapter_down(struct ql_adapter *qdev)
3896{
3897 int i, status = 0;
3898
3899 ql_link_off(qdev);
3900
3901 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003902
Ron Mercer39aa8162009-08-27 11:02:11 +00003903 for (i = 0; i < qdev->rss_ring_count; i++)
3904 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003905
3906 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3907
3908 ql_disable_interrupts(qdev);
3909
3910 ql_tx_ring_clean(qdev);
3911
Ron Mercer6b318cb2009-03-09 10:59:26 +00003912 /* Call netif_napi_del() from common point.
3913 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003914 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003915 netif_napi_del(&qdev->rx_ring[i].napi);
3916
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003917 status = ql_adapter_reset(qdev);
3918 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003919 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3920 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003921 ql_free_rx_buffers(qdev);
3922
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003923 return status;
3924}
3925
3926static int ql_adapter_up(struct ql_adapter *qdev)
3927{
3928 int err = 0;
3929
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003930 err = ql_adapter_initialize(qdev);
3931 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003932 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933 goto err_init;
3934 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003935 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003936 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003937 /* If the port is initialized and the
3938 * link is up the turn on the carrier.
3939 */
3940 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3941 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003942 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003943 /* Restore rx mode. */
3944 clear_bit(QL_ALLMULTI, &qdev->flags);
3945 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3946 qlge_set_multicast_list(qdev->ndev);
3947
Ron Mercerc1b60092010-10-27 04:58:12 +00003948 /* Restore vlan setting. */
3949 qlge_restore_vlan(qdev);
3950
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003951 ql_enable_interrupts(qdev);
3952 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003953 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954
3955 return 0;
3956err_init:
3957 ql_adapter_reset(qdev);
3958 return err;
3959}
3960
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003961static void ql_release_adapter_resources(struct ql_adapter *qdev)
3962{
3963 ql_free_mem_resources(qdev);
3964 ql_free_irq(qdev);
3965}
3966
3967static int ql_get_adapter_resources(struct ql_adapter *qdev)
3968{
3969 int status = 0;
3970
3971 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003972 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003973 return -ENOMEM;
3974 }
3975 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003976 return status;
3977}
3978
3979static int qlge_close(struct net_device *ndev)
3980{
3981 struct ql_adapter *qdev = netdev_priv(ndev);
3982
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003983 /* If we hit pci_channel_io_perm_failure
3984 * failure condition, then we already
3985 * brought the adapter down.
3986 */
3987 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003988 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003989 clear_bit(QL_EEH_FATAL, &qdev->flags);
3990 return 0;
3991 }
3992
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003993 /*
3994 * Wait for device to recover from a reset.
3995 * (Rarely happens, but possible.)
3996 */
3997 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3998 msleep(1);
3999 ql_adapter_down(qdev);
4000 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004001 return 0;
4002}
4003
4004static int ql_configure_rings(struct ql_adapter *qdev)
4005{
4006 int i;
4007 struct rx_ring *rx_ring;
4008 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004009 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004010 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4011 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4012
4013 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004014
Ron Mercera4ab6132009-08-27 11:02:10 +00004015 /* In a perfect world we have one RSS ring for each CPU
4016 * and each has it's own vector. To do that we ask for
4017 * cpu_cnt vectors. ql_enable_msix() will adjust the
4018 * vector count to what we actually get. We then
4019 * allocate an RSS ring for each.
4020 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004021 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004022 qdev->intr_count = cpu_cnt;
4023 ql_enable_msix(qdev);
4024 /* Adjust the RSS ring count to the actual vector count. */
4025 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004026 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004027 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004029 for (i = 0; i < qdev->tx_ring_count; i++) {
4030 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004031 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004032 tx_ring->qdev = qdev;
4033 tx_ring->wq_id = i;
4034 tx_ring->wq_len = qdev->tx_ring_size;
4035 tx_ring->wq_size =
4036 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4037
4038 /*
4039 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004040 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004041 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004042 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004043 }
4044
4045 for (i = 0; i < qdev->rx_ring_count; i++) {
4046 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004047 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004048 rx_ring->qdev = qdev;
4049 rx_ring->cq_id = i;
4050 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004051 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004052 /*
4053 * Inbound (RSS) queues.
4054 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004055 rx_ring->cq_len = qdev->rx_ring_size;
4056 rx_ring->cq_size =
4057 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4058 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4059 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004060 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004061 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004062 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4063 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004064 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004065 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004066 rx_ring->type = RX_Q;
4067 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004068 /*
4069 * Outbound queue handles outbound completions only.
4070 */
4071 /* outbound cq is same size as tx_ring it services. */
4072 rx_ring->cq_len = qdev->tx_ring_size;
4073 rx_ring->cq_size =
4074 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4075 rx_ring->lbq_len = 0;
4076 rx_ring->lbq_size = 0;
4077 rx_ring->lbq_buf_size = 0;
4078 rx_ring->sbq_len = 0;
4079 rx_ring->sbq_size = 0;
4080 rx_ring->sbq_buf_size = 0;
4081 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004082 }
4083 }
4084 return 0;
4085}
4086
4087static int qlge_open(struct net_device *ndev)
4088{
4089 int err = 0;
4090 struct ql_adapter *qdev = netdev_priv(ndev);
4091
Ron Mercer74e12432009-11-11 12:54:04 +00004092 err = ql_adapter_reset(qdev);
4093 if (err)
4094 return err;
4095
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004096 err = ql_configure_rings(qdev);
4097 if (err)
4098 return err;
4099
4100 err = ql_get_adapter_resources(qdev);
4101 if (err)
4102 goto error_up;
4103
4104 err = ql_adapter_up(qdev);
4105 if (err)
4106 goto error_up;
4107
4108 return err;
4109
4110error_up:
4111 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004112 return err;
4113}
4114
Ron Mercer7c734352009-10-19 03:32:19 +00004115static int ql_change_rx_buffers(struct ql_adapter *qdev)
4116{
4117 struct rx_ring *rx_ring;
4118 int i, status;
4119 u32 lbq_buf_len;
4120
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004121 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004122 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4123 int i = 3;
4124 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004125 netif_err(qdev, ifup, qdev->ndev,
4126 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004127 ssleep(1);
4128 }
4129
4130 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004131 netif_err(qdev, ifup, qdev->ndev,
4132 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004133 return -ETIMEDOUT;
4134 }
4135 }
4136
4137 status = ql_adapter_down(qdev);
4138 if (status)
4139 goto error;
4140
4141 /* Get the new rx buffer size. */
4142 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4143 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4144 qdev->lbq_buf_order = get_order(lbq_buf_len);
4145
4146 for (i = 0; i < qdev->rss_ring_count; i++) {
4147 rx_ring = &qdev->rx_ring[i];
4148 /* Set the new size. */
4149 rx_ring->lbq_buf_size = lbq_buf_len;
4150 }
4151
4152 status = ql_adapter_up(qdev);
4153 if (status)
4154 goto error;
4155
4156 return status;
4157error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004158 netif_alert(qdev, ifup, qdev->ndev,
4159 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004160 set_bit(QL_ADAPTER_UP, &qdev->flags);
4161 dev_close(qdev->ndev);
4162 return status;
4163}
4164
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004165static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4166{
4167 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004168 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004169
4170 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004171 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004172 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004173 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004174 } else
4175 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004176
4177 queue_delayed_work(qdev->workqueue,
4178 &qdev->mpi_port_cfg_work, 3*HZ);
4179
Breno Leitao746079d2010-02-04 10:11:19 +00004180 ndev->mtu = new_mtu;
4181
Ron Mercer7c734352009-10-19 03:32:19 +00004182 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004183 return 0;
4184 }
4185
Ron Mercer7c734352009-10-19 03:32:19 +00004186 status = ql_change_rx_buffers(qdev);
4187 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004188 netif_err(qdev, ifup, qdev->ndev,
4189 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004190 }
4191
4192 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004193}
4194
4195static struct net_device_stats *qlge_get_stats(struct net_device
4196 *ndev)
4197{
Ron Mercer885ee392009-11-03 13:49:31 +00004198 struct ql_adapter *qdev = netdev_priv(ndev);
4199 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4200 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4201 unsigned long pkts, mcast, dropped, errors, bytes;
4202 int i;
4203
4204 /* Get RX stats. */
4205 pkts = mcast = dropped = errors = bytes = 0;
4206 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4207 pkts += rx_ring->rx_packets;
4208 bytes += rx_ring->rx_bytes;
4209 dropped += rx_ring->rx_dropped;
4210 errors += rx_ring->rx_errors;
4211 mcast += rx_ring->rx_multicast;
4212 }
4213 ndev->stats.rx_packets = pkts;
4214 ndev->stats.rx_bytes = bytes;
4215 ndev->stats.rx_dropped = dropped;
4216 ndev->stats.rx_errors = errors;
4217 ndev->stats.multicast = mcast;
4218
4219 /* Get TX stats. */
4220 pkts = errors = bytes = 0;
4221 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4222 pkts += tx_ring->tx_packets;
4223 bytes += tx_ring->tx_bytes;
4224 errors += tx_ring->tx_errors;
4225 }
4226 ndev->stats.tx_packets = pkts;
4227 ndev->stats.tx_bytes = bytes;
4228 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004229 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004230}
4231
stephen hemmingerac409212010-10-21 07:50:54 +00004232static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004233{
Joe Perchesb16fed02010-11-15 11:12:28 +00004234 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004235 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004236 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004237
Ron Mercercc288f52009-02-23 10:42:14 +00004238 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4239 if (status)
4240 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004241 /*
4242 * Set or clear promiscuous mode if a
4243 * transition is taking place.
4244 */
4245 if (ndev->flags & IFF_PROMISC) {
4246 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4247 if (ql_set_routing_reg
4248 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004249 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004250 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004251 } else {
4252 set_bit(QL_PROMISCUOUS, &qdev->flags);
4253 }
4254 }
4255 } else {
4256 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4257 if (ql_set_routing_reg
4258 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004259 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004260 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004261 } else {
4262 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4263 }
4264 }
4265 }
4266
4267 /*
4268 * Set or clear all multicast mode if a
4269 * transition is taking place.
4270 */
4271 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004272 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004273 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4274 if (ql_set_routing_reg
4275 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004276 netif_err(qdev, hw, qdev->ndev,
4277 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004278 } else {
4279 set_bit(QL_ALLMULTI, &qdev->flags);
4280 }
4281 }
4282 } else {
4283 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4284 if (ql_set_routing_reg
4285 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004286 netif_err(qdev, hw, qdev->ndev,
4287 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004288 } else {
4289 clear_bit(QL_ALLMULTI, &qdev->flags);
4290 }
4291 }
4292 }
4293
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004294 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004295 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4296 if (status)
4297 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004298 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004299 netdev_for_each_mc_addr(ha, ndev) {
4300 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004301 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004302 netif_err(qdev, hw, qdev->ndev,
4303 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004304 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004305 goto exit;
4306 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004307 i++;
4308 }
Ron Mercercc288f52009-02-23 10:42:14 +00004309 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004310 if (ql_set_routing_reg
4311 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004312 netif_err(qdev, hw, qdev->ndev,
4313 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004314 } else {
4315 set_bit(QL_ALLMULTI, &qdev->flags);
4316 }
4317 }
4318exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004319 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004320}
4321
4322static int qlge_set_mac_address(struct net_device *ndev, void *p)
4323{
Joe Perchesb16fed02010-11-15 11:12:28 +00004324 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004325 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004326 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004328 if (!is_valid_ether_addr(addr->sa_data))
4329 return -EADDRNOTAVAIL;
4330 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004331 /* Update local copy of current mac address. */
4332 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333
Ron Mercercc288f52009-02-23 10:42:14 +00004334 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4335 if (status)
4336 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004337 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4338 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004339 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004340 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004341 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4342 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004343}
4344
4345static void qlge_tx_timeout(struct net_device *ndev)
4346{
Joe Perchesb16fed02010-11-15 11:12:28 +00004347 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004348 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004349}
4350
4351static void ql_asic_reset_work(struct work_struct *work)
4352{
4353 struct ql_adapter *qdev =
4354 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004355 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004356 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004357 status = ql_adapter_down(qdev);
4358 if (status)
4359 goto error;
4360
4361 status = ql_adapter_up(qdev);
4362 if (status)
4363 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004364
4365 /* Restore rx mode. */
4366 clear_bit(QL_ALLMULTI, &qdev->flags);
4367 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4368 qlge_set_multicast_list(qdev->ndev);
4369
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004370 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004371 return;
4372error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004373 netif_alert(qdev, ifup, qdev->ndev,
4374 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004375
Ron Mercerdb988122009-03-09 10:59:17 +00004376 set_bit(QL_ADAPTER_UP, &qdev->flags);
4377 dev_close(qdev->ndev);
4378 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004379}
4380
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004381static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004382 .get_flash = ql_get_8012_flash_params,
4383 .port_initialize = ql_8012_port_initialize,
4384};
4385
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004386static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004387 .get_flash = ql_get_8000_flash_params,
4388 .port_initialize = ql_8000_port_initialize,
4389};
4390
Ron Mercere4552f52009-06-09 05:39:32 +00004391/* Find the pcie function number for the other NIC
4392 * on this chip. Since both NIC functions share a
4393 * common firmware we have the lowest enabled function
4394 * do any common work. Examples would be resetting
4395 * after a fatal firmware error, or doing a firmware
4396 * coredump.
4397 */
4398static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004399{
Ron Mercere4552f52009-06-09 05:39:32 +00004400 int status = 0;
4401 u32 temp;
4402 u32 nic_func1, nic_func2;
4403
4404 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4405 &temp);
4406 if (status)
4407 return status;
4408
4409 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4410 MPI_TEST_NIC_FUNC_MASK);
4411 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4412 MPI_TEST_NIC_FUNC_MASK);
4413
4414 if (qdev->func == nic_func1)
4415 qdev->alt_func = nic_func2;
4416 else if (qdev->func == nic_func2)
4417 qdev->alt_func = nic_func1;
4418 else
4419 status = -EIO;
4420
4421 return status;
4422}
4423
4424static int ql_get_board_info(struct ql_adapter *qdev)
4425{
4426 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004427 qdev->func =
4428 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004429 if (qdev->func > 3)
4430 return -EIO;
4431
4432 status = ql_get_alt_pcie_func(qdev);
4433 if (status)
4434 return status;
4435
4436 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4437 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004438 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4439 qdev->port_link_up = STS_PL1;
4440 qdev->port_init = STS_PI1;
4441 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4442 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4443 } else {
4444 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4445 qdev->port_link_up = STS_PL0;
4446 qdev->port_init = STS_PI0;
4447 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4448 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4449 }
4450 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004451 qdev->device_id = qdev->pdev->device;
4452 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4453 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004454 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4455 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004456 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004457}
4458
4459static void ql_release_all(struct pci_dev *pdev)
4460{
4461 struct net_device *ndev = pci_get_drvdata(pdev);
4462 struct ql_adapter *qdev = netdev_priv(ndev);
4463
4464 if (qdev->workqueue) {
4465 destroy_workqueue(qdev->workqueue);
4466 qdev->workqueue = NULL;
4467 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004468
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004469 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004470 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004471 if (qdev->doorbell_area)
4472 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004473 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004474 pci_release_regions(pdev);
4475 pci_set_drvdata(pdev, NULL);
4476}
4477
4478static int __devinit ql_init_device(struct pci_dev *pdev,
4479 struct net_device *ndev, int cards_found)
4480{
4481 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004482 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004483
Ron Mercere3324712009-07-02 06:06:13 +00004484 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004485 err = pci_enable_device(pdev);
4486 if (err) {
4487 dev_err(&pdev->dev, "PCI device enable failed.\n");
4488 return err;
4489 }
4490
Ron Mercerebd6e772009-09-29 08:39:25 +00004491 qdev->ndev = ndev;
4492 qdev->pdev = pdev;
4493 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004494
Ron Mercerbc9167f2009-10-10 09:35:04 +00004495 /* Set PCIe read request size */
4496 err = pcie_set_readrq(pdev, 4096);
4497 if (err) {
4498 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004499 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004500 }
4501
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004502 err = pci_request_regions(pdev, DRV_NAME);
4503 if (err) {
4504 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004505 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004506 }
4507
4508 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004509 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004510 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004511 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004512 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004513 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004514 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004515 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004516 }
4517
4518 if (err) {
4519 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004520 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004521 }
4522
Ron Mercer73475332009-11-06 07:44:58 +00004523 /* Set PCIe reset type for EEH to fundamental. */
4524 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004525 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004526 qdev->reg_base =
4527 ioremap_nocache(pci_resource_start(pdev, 1),
4528 pci_resource_len(pdev, 1));
4529 if (!qdev->reg_base) {
4530 dev_err(&pdev->dev, "Register mapping failed.\n");
4531 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004532 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004533 }
4534
4535 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4536 qdev->doorbell_area =
4537 ioremap_nocache(pci_resource_start(pdev, 3),
4538 pci_resource_len(pdev, 3));
4539 if (!qdev->doorbell_area) {
4540 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4541 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004542 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004543 }
4544
Ron Mercere4552f52009-06-09 05:39:32 +00004545 err = ql_get_board_info(qdev);
4546 if (err) {
4547 dev_err(&pdev->dev, "Register access failed.\n");
4548 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004549 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004550 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004551 qdev->msg_enable = netif_msg_init(debug, default_msg);
4552 spin_lock_init(&qdev->hw_lock);
4553 spin_lock_init(&qdev->stats_lock);
4554
Ron Mercer8aae2602010-01-15 13:31:28 +00004555 if (qlge_mpi_coredump) {
4556 qdev->mpi_coredump =
4557 vmalloc(sizeof(struct ql_mpi_coredump));
4558 if (qdev->mpi_coredump == NULL) {
4559 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4560 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004561 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004562 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004563 if (qlge_force_coredump)
4564 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004565 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004567 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004568 if (err) {
4569 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004570 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 }
4572
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004573 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004574 /* Keep local copy of current mac address. */
4575 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576
4577 /* Set up the default ring sizes. */
4578 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4579 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4580
4581 /* Set up the coalescing parameters. */
4582 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4583 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4584 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4585 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4586
4587 /*
4588 * Set up the operating parameters.
4589 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004590 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4591 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4592 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4593 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004594 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004595 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004596 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004597 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004598 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004599
4600 if (!cards_found) {
4601 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4602 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4603 DRV_NAME, DRV_VERSION);
4604 }
4605 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004606err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004607 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004608err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004609 pci_disable_device(pdev);
4610 return err;
4611}
4612
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004613static const struct net_device_ops qlge_netdev_ops = {
4614 .ndo_open = qlge_open,
4615 .ndo_stop = qlge_close,
4616 .ndo_start_xmit = qlge_send,
4617 .ndo_change_mtu = qlge_change_mtu,
4618 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004619 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004620 .ndo_set_mac_address = qlge_set_mac_address,
4621 .ndo_validate_addr = eth_validate_addr,
4622 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004623 .ndo_fix_features = qlge_fix_features,
4624 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004625 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4626 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004627};
4628
Ron Mercer15c052f2010-02-04 13:32:46 -08004629static void ql_timer(unsigned long data)
4630{
4631 struct ql_adapter *qdev = (struct ql_adapter *)data;
4632 u32 var = 0;
4633
4634 var = ql_read32(qdev, STS);
4635 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004636 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004637 return;
4638 }
4639
Breno Leitao72046d82010-07-01 03:00:17 +00004640 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004641}
4642
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004643static int __devinit qlge_probe(struct pci_dev *pdev,
4644 const struct pci_device_id *pci_entry)
4645{
4646 struct net_device *ndev = NULL;
4647 struct ql_adapter *qdev = NULL;
4648 static int cards_found = 0;
4649 int err = 0;
4650
Ron Mercer1e213302009-03-09 10:59:21 +00004651 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4652 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004653 if (!ndev)
4654 return -ENOMEM;
4655
4656 err = ql_init_device(pdev, ndev, cards_found);
4657 if (err < 0) {
4658 free_netdev(ndev);
4659 return err;
4660 }
4661
4662 qdev = netdev_priv(ndev);
4663 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004664 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4665 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4666 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4667 ndev->features = ndev->hw_features |
4668 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004669
4670 if (test_bit(QL_DMA64, &qdev->flags))
4671 ndev->features |= NETIF_F_HIGHDMA;
4672
4673 /*
4674 * Set up net_device structure.
4675 */
4676 ndev->tx_queue_len = qdev->tx_ring_size;
4677 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004678
4679 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004680 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004681 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004682
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004683 err = register_netdev(ndev);
4684 if (err) {
4685 dev_err(&pdev->dev, "net device registration failed.\n");
4686 ql_release_all(pdev);
4687 pci_disable_device(pdev);
4688 return err;
4689 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004690 /* Start up the timer to trigger EEH if
4691 * the bus goes dead
4692 */
4693 init_timer_deferrable(&qdev->timer);
4694 qdev->timer.data = (unsigned long)qdev;
4695 qdev->timer.function = ql_timer;
4696 qdev->timer.expires = jiffies + (5*HZ);
4697 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004698 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004699 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004700 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004701 cards_found++;
4702 return 0;
4703}
4704
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004705netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4706{
4707 return qlge_send(skb, ndev);
4708}
4709
4710int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4711{
4712 return ql_clean_inbound_rx_ring(rx_ring, budget);
4713}
4714
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004715static void __devexit qlge_remove(struct pci_dev *pdev)
4716{
4717 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004718 struct ql_adapter *qdev = netdev_priv(ndev);
4719 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004720 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004721 unregister_netdev(ndev);
4722 ql_release_all(pdev);
4723 pci_disable_device(pdev);
4724 free_netdev(ndev);
4725}
4726
Ron Mercer6d190c62009-10-28 08:39:20 +00004727/* Clean up resources without touching hardware. */
4728static void ql_eeh_close(struct net_device *ndev)
4729{
4730 int i;
4731 struct ql_adapter *qdev = netdev_priv(ndev);
4732
4733 if (netif_carrier_ok(ndev)) {
4734 netif_carrier_off(ndev);
4735 netif_stop_queue(ndev);
4736 }
4737
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004738 /* Disabling the timer */
4739 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004740 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004741
4742 for (i = 0; i < qdev->rss_ring_count; i++)
4743 netif_napi_del(&qdev->rx_ring[i].napi);
4744
4745 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4746 ql_tx_ring_clean(qdev);
4747 ql_free_rx_buffers(qdev);
4748 ql_release_adapter_resources(qdev);
4749}
4750
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004751/*
4752 * This callback is called by the PCI subsystem whenever
4753 * a PCI bus error is detected.
4754 */
4755static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4756 enum pci_channel_state state)
4757{
4758 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004759 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004760
Ron Mercer6d190c62009-10-28 08:39:20 +00004761 switch (state) {
4762 case pci_channel_io_normal:
4763 return PCI_ERS_RESULT_CAN_RECOVER;
4764 case pci_channel_io_frozen:
4765 netif_device_detach(ndev);
4766 if (netif_running(ndev))
4767 ql_eeh_close(ndev);
4768 pci_disable_device(pdev);
4769 return PCI_ERS_RESULT_NEED_RESET;
4770 case pci_channel_io_perm_failure:
4771 dev_err(&pdev->dev,
4772 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004773 ql_eeh_close(ndev);
4774 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004775 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004776 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004777
4778 /* Request a slot reset. */
4779 return PCI_ERS_RESULT_NEED_RESET;
4780}
4781
4782/*
4783 * This callback is called after the PCI buss has been reset.
4784 * Basically, this tries to restart the card from scratch.
4785 * This is a shortened version of the device probe/discovery code,
4786 * it resembles the first-half of the () routine.
4787 */
4788static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4789{
4790 struct net_device *ndev = pci_get_drvdata(pdev);
4791 struct ql_adapter *qdev = netdev_priv(ndev);
4792
Ron Mercer6d190c62009-10-28 08:39:20 +00004793 pdev->error_state = pci_channel_io_normal;
4794
4795 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004796 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004797 netif_err(qdev, ifup, qdev->ndev,
4798 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004799 return PCI_ERS_RESULT_DISCONNECT;
4800 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004801 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004802
4803 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004804 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004805 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004806 return PCI_ERS_RESULT_DISCONNECT;
4807 }
4808
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004809 return PCI_ERS_RESULT_RECOVERED;
4810}
4811
4812static void qlge_io_resume(struct pci_dev *pdev)
4813{
4814 struct net_device *ndev = pci_get_drvdata(pdev);
4815 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004816 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004817
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004818 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004819 err = qlge_open(ndev);
4820 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004821 netif_err(qdev, ifup, qdev->ndev,
4822 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004823 return;
4824 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004825 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004826 netif_err(qdev, ifup, qdev->ndev,
4827 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004828 }
Breno Leitao72046d82010-07-01 03:00:17 +00004829 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004830 netif_device_attach(ndev);
4831}
4832
4833static struct pci_error_handlers qlge_err_handler = {
4834 .error_detected = qlge_io_error_detected,
4835 .slot_reset = qlge_io_slot_reset,
4836 .resume = qlge_io_resume,
4837};
4838
4839static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4840{
4841 struct net_device *ndev = pci_get_drvdata(pdev);
4842 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004843 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004844
4845 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004846 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004847
4848 if (netif_running(ndev)) {
4849 err = ql_adapter_down(qdev);
4850 if (!err)
4851 return err;
4852 }
4853
Ron Mercerbc083ce2009-10-21 11:07:40 +00004854 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004855 err = pci_save_state(pdev);
4856 if (err)
4857 return err;
4858
4859 pci_disable_device(pdev);
4860
4861 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4862
4863 return 0;
4864}
4865
David S. Miller04da2cf2008-09-19 16:14:24 -07004866#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004867static int qlge_resume(struct pci_dev *pdev)
4868{
4869 struct net_device *ndev = pci_get_drvdata(pdev);
4870 struct ql_adapter *qdev = netdev_priv(ndev);
4871 int err;
4872
4873 pci_set_power_state(pdev, PCI_D0);
4874 pci_restore_state(pdev);
4875 err = pci_enable_device(pdev);
4876 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004877 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004878 return err;
4879 }
4880 pci_set_master(pdev);
4881
4882 pci_enable_wake(pdev, PCI_D3hot, 0);
4883 pci_enable_wake(pdev, PCI_D3cold, 0);
4884
4885 if (netif_running(ndev)) {
4886 err = ql_adapter_up(qdev);
4887 if (err)
4888 return err;
4889 }
4890
Breno Leitao72046d82010-07-01 03:00:17 +00004891 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004892 netif_device_attach(ndev);
4893
4894 return 0;
4895}
David S. Miller04da2cf2008-09-19 16:14:24 -07004896#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004897
4898static void qlge_shutdown(struct pci_dev *pdev)
4899{
4900 qlge_suspend(pdev, PMSG_SUSPEND);
4901}
4902
4903static struct pci_driver qlge_driver = {
4904 .name = DRV_NAME,
4905 .id_table = qlge_pci_tbl,
4906 .probe = qlge_probe,
4907 .remove = __devexit_p(qlge_remove),
4908#ifdef CONFIG_PM
4909 .suspend = qlge_suspend,
4910 .resume = qlge_resume,
4911#endif
4912 .shutdown = qlge_shutdown,
4913 .err_handler = &qlge_err_handler
4914};
4915
4916static int __init qlge_init_module(void)
4917{
4918 return pci_register_driver(&qlge_driver);
4919}
4920
4921static void __exit qlge_exit(void)
4922{
4923 pci_unregister_driver(&qlge_driver);
4924}
4925
4926module_init(qlge_init_module);
4927module_exit(qlge_exit);