blob: 5cfba6aa1b9233f2e8012b3654cdd61e8cf3f538 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
Ron Mercer76b26692009-10-08 09:54:40 +0000408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400426 status =
427 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 status = -EPERM;
442 }
443exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 return status;
445}
446
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000447/* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452{
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000458 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000476 return status;
477}
478
Ron Mercer6a473302009-07-02 06:06:12 +0000479void ql_link_on(struct ql_adapter *qdev)
480{
Joe Perchesae9540f72010-02-09 11:49:52 +0000481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484}
485
486void ql_link_off(struct ql_adapter *qdev)
487{
Joe Perchesae9540f72010-02-09 11:49:52 +0000488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000572 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000579 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 return status;
615}
616
617static void ql_enable_interrupts(struct ql_adapter *qdev)
618{
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620}
621
622static void ql_disable_interrupts(struct ql_adapter *qdev)
623{
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625}
626
627/* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300630 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700633u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657}
658
659static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660{
661 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000671 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700672 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 var = ql_read32(qdev, STS);
676 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000678 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 return var;
680}
681
682static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683{
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696}
697
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000698static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699{
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000716
717 return csum;
718}
719
Ron Mercer26351472009-02-02 13:53:57 -0800720static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400740exit:
741 return status;
742}
743
Ron Mercercdca8d02009-03-02 08:07:31 +0000744static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745{
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000750 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
Ron Mercere4552f52009-06-09 05:39:32 +0000755 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 status = -EINVAL;
779 goto exit;
780 }
781
Ron Mercer542512e2009-06-09 05:39:33 +0000782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000801 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 qdev->ndev->addr_len);
803
804exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807}
808
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000809static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400810{
811 int i;
812 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800813 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800814 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816
817 /* Second function's parameters follow the first
818 * function's.
819 */
Ron Mercere4552f52009-06-09 05:39:32 +0000820 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000821 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800827 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 goto exit;
832 }
833
834 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400854exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857}
858
859/* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864{
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876}
877
878/* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883{
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899exit:
900 return status;
901}
902
903/* This is used for reading the 64-bit statistics regs. */
904int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905{
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920exit:
921 return status;
922}
923
Ron Mercercdca8d02009-03-02 08:07:31 +0000924static int ql_8000_port_initialize(struct ql_adapter *qdev)
925{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000926 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939exit:
940 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000941}
942
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400943/* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000949static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400950{
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964 }
965 return status;
966 }
967
Joe Perchesae9540f72010-02-09 11:49:52 +0000968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022}
1023
Ron Mercer7c734352009-10-19 03:32:19 +00001024static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025{
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027}
1028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001029/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001030static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031{
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038}
1039
Ron Mercer7c734352009-10-19 03:32:19 +00001040static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042{
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001046 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071}
1072
1073/* Update an rx ring index. */
1074static void ql_update_cq(struct rx_ring *rx_ring)
1075{
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082}
1083
1084static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085{
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087}
1088
Ron Mercer7c734352009-10-19 03:32:19 +00001089static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091{
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001136/* Process (refill) a large buffer queue. */
1137static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138{
Ron Mercer49f21862009-02-23 10:42:16 +00001139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001141 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001142 u64 map;
1143 int i;
1144
Ron Mercer7c734352009-10-19 03:32:19 +00001145 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001152 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001153 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 return;
1157 }
Ron Mercer7c734352009-10-19 03:32:19 +00001158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001163 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001164 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 }
1188}
1189
1190/* Process (refill) a small buffer queue. */
1191static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192{
Ron Mercer49f21862009-02-23 10:42:16 +00001193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001195 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001205 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001212 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Ron Mercer4f848c02010-01-02 10:37:43 +00001436/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001437static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 struct rx_ring *rx_ring,
1439 struct ib_mac_iocb_rsp *ib_mac_rsp,
1440 u32 length,
1441 u16 vlan_id)
1442{
1443 struct sk_buff *skb;
1444 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001445 struct napi_struct *napi = &rx_ring->napi;
1446
1447 napi->dev = qdev->ndev;
1448
1449 skb = napi_get_frags(napi);
1450 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001451 netif_err(qdev, drv, qdev->ndev,
1452 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001453 rx_ring->rx_dropped++;
1454 put_page(lbq_desc->p.pg_chunk.page);
1455 return;
1456 }
1457 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001458 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1459 lbq_desc->p.pg_chunk.page,
1460 lbq_desc->p.pg_chunk.offset,
1461 length);
Ron Mercer63526712010-01-02 10:37:44 +00001462
1463 skb->len += length;
1464 skb->data_len += length;
1465 skb->truesize += length;
1466 skb_shinfo(skb)->nr_frags++;
1467
1468 rx_ring->rx_packets++;
1469 rx_ring->rx_bytes += length;
1470 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001472 if (vlan_id != 0xffff)
1473 __vlan_hwaccel_put_tag(skb, vlan_id);
1474 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001475}
1476
1477/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001478static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1479 struct rx_ring *rx_ring,
1480 struct ib_mac_iocb_rsp *ib_mac_rsp,
1481 u32 length,
1482 u16 vlan_id)
1483{
1484 struct net_device *ndev = qdev->ndev;
1485 struct sk_buff *skb = NULL;
1486 void *addr;
1487 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1488 struct napi_struct *napi = &rx_ring->napi;
1489
1490 skb = netdev_alloc_skb(ndev, length);
1491 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001492 netif_err(qdev, drv, qdev->ndev,
1493 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001494 rx_ring->rx_dropped++;
1495 put_page(lbq_desc->p.pg_chunk.page);
1496 return;
1497 }
1498
1499 addr = lbq_desc->p.pg_chunk.va;
1500 prefetch(addr);
1501
1502
1503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001505 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001506 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001507 rx_ring->rx_errors++;
1508 goto err_out;
1509 }
1510
1511 /* The max framesize filter on this chip is set higher than
1512 * MTU since FCoE uses 2k frames.
1513 */
1514 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001515 netif_err(qdev, drv, qdev->ndev,
1516 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001517 rx_ring->rx_dropped++;
1518 goto err_out;
1519 }
1520 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1523 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001524 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1525 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1526 length-ETH_HLEN);
1527 skb->len += length-ETH_HLEN;
1528 skb->data_len += length-ETH_HLEN;
1529 skb->truesize += length-ETH_HLEN;
1530
1531 rx_ring->rx_packets++;
1532 rx_ring->rx_bytes += skb->len;
1533 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001534 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001535
Michał Mirosław88230fd2011-04-18 13:31:21 +00001536 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001537 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1538 /* TCP frame. */
1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1543 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1544 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1545 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001546 struct iphdr *iph =
1547 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001548 if (!(iph->frag_off &
1549 cpu_to_be16(IP_MF|IP_OFFSET))) {
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001551 netif_printk(qdev, rx_status, KERN_DEBUG,
1552 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001553 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 }
1555 }
1556 }
1557
1558 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001559 if (vlan_id != 0xffff)
1560 __vlan_hwaccel_put_tag(skb, vlan_id);
1561 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1562 napi_gro_receive(napi, skb);
1563 else
1564 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001565 return;
1566err_out:
1567 dev_kfree_skb_any(skb);
1568 put_page(lbq_desc->p.pg_chunk.page);
1569}
1570
1571/* Process an inbound completion from an rx ring. */
1572static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp,
1575 u32 length,
1576 u16 vlan_id)
1577{
1578 struct net_device *ndev = qdev->ndev;
1579 struct sk_buff *skb = NULL;
1580 struct sk_buff *new_skb = NULL;
1581 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1582
1583 skb = sbq_desc->p.skb;
1584 /* Allocate new_skb and copy */
1585 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1586 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001587 netif_err(qdev, probe, qdev->ndev,
1588 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001589 rx_ring->rx_dropped++;
1590 return;
1591 }
1592 skb_reserve(new_skb, NET_IP_ALIGN);
1593 memcpy(skb_put(new_skb, length), skb->data, length);
1594 skb = new_skb;
1595
1596 /* Frame error, so drop the packet. */
1597 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001598 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001599 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001600 dev_kfree_skb_any(skb);
1601 rx_ring->rx_errors++;
1602 return;
1603 }
1604
1605 /* loopback self test for ethtool */
1606 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1607 ql_check_lb_frame(qdev, skb);
1608 dev_kfree_skb_any(skb);
1609 return;
1610 }
1611
1612 /* The max framesize filter on this chip is set higher than
1613 * MTU since FCoE uses 2k frames.
1614 */
1615 if (skb->len > ndev->mtu + ETH_HLEN) {
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_dropped++;
1618 return;
1619 }
1620
1621 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001622 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001623 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1624 "%s Multicast.\n",
1625 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1626 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1627 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1628 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1629 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1630 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001631 }
1632 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001633 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1634 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001635
1636 rx_ring->rx_packets++;
1637 rx_ring->rx_bytes += skb->len;
1638 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001639 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001640
1641 /* If rx checksum is on, and there are no
1642 * csum or frame errors.
1643 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001644 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001645 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1646 /* TCP frame. */
1647 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001648 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1649 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001650 skb->ip_summed = CHECKSUM_UNNECESSARY;
1651 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1652 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1653 /* Unfragmented ipv4 UDP frame. */
1654 struct iphdr *iph = (struct iphdr *) skb->data;
1655 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001656 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001657 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001658 netif_printk(qdev, rx_status, KERN_DEBUG,
1659 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001660 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001661 }
1662 }
1663 }
1664
1665 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001666 if (vlan_id != 0xffff)
1667 __vlan_hwaccel_put_tag(skb, vlan_id);
1668 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1669 napi_gro_receive(&rx_ring->napi, skb);
1670 else
1671 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001672}
1673
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001674static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001675{
1676 void *temp_addr = skb->data;
1677
1678 /* Undo the skb_reserve(skb,32) we did before
1679 * giving to hardware, and realign data on
1680 * a 2-byte boundary.
1681 */
1682 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1683 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1684 skb_copy_to_linear_data(skb, temp_addr,
1685 (unsigned int)len);
1686}
1687
1688/*
1689 * This function builds an skb for the given inbound
1690 * completion. It will be rewritten for readability in the near
1691 * future, but for not it works well.
1692 */
1693static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1694 struct rx_ring *rx_ring,
1695 struct ib_mac_iocb_rsp *ib_mac_rsp)
1696{
1697 struct bq_desc *lbq_desc;
1698 struct bq_desc *sbq_desc;
1699 struct sk_buff *skb = NULL;
1700 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1701 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1702
1703 /*
1704 * Handle the header buffer if present.
1705 */
1706 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1707 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001708 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1709 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001710 /*
1711 * Headers fit nicely into a small buffer.
1712 */
1713 sbq_desc = ql_get_curr_sbuf(rx_ring);
1714 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001715 dma_unmap_addr(sbq_desc, mapaddr),
1716 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001717 PCI_DMA_FROMDEVICE);
1718 skb = sbq_desc->p.skb;
1719 ql_realign_skb(skb, hdr_len);
1720 skb_put(skb, hdr_len);
1721 sbq_desc->p.skb = NULL;
1722 }
1723
1724 /*
1725 * Handle the data buffer(s).
1726 */
1727 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001728 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1729 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001730 return skb;
1731 }
1732
1733 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1734 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001735 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1736 "Headers in small, data of %d bytes in small, combine them.\n",
1737 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001738 /*
1739 * Data is less than small buffer size so it's
1740 * stuffed in a small buffer.
1741 * For this case we append the data
1742 * from the "data" small buffer to the "header" small
1743 * buffer.
1744 */
1745 sbq_desc = ql_get_curr_sbuf(rx_ring);
1746 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001747 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001748 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001749 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 (sbq_desc, maplen),
1751 PCI_DMA_FROMDEVICE);
1752 memcpy(skb_put(skb, length),
1753 sbq_desc->p.skb->data, length);
1754 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001755 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756 (sbq_desc,
1757 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001758 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001759 (sbq_desc,
1760 maplen),
1761 PCI_DMA_FROMDEVICE);
1762 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001763 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1764 "%d bytes in a single small buffer.\n",
1765 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001766 sbq_desc = ql_get_curr_sbuf(rx_ring);
1767 skb = sbq_desc->p.skb;
1768 ql_realign_skb(skb, length);
1769 skb_put(skb, length);
1770 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001771 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001772 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001773 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001774 maplen),
1775 PCI_DMA_FROMDEVICE);
1776 sbq_desc->p.skb = NULL;
1777 }
1778 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1779 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001780 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1781 "Header in small, %d bytes in large. Chain large to small!\n",
1782 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001783 /*
1784 * The data is in a single large buffer. We
1785 * chain it to the header buffer's skb and let
1786 * it rip.
1787 */
Ron Mercer7c734352009-10-19 03:32:19 +00001788 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001789 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1790 "Chaining page at offset = %d, for %d bytes to skb.\n",
1791 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001792 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1793 lbq_desc->p.pg_chunk.offset,
1794 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 skb->len += length;
1796 skb->data_len += length;
1797 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001798 } else {
1799 /*
1800 * The headers and data are in a single large buffer. We
1801 * copy it to a new skb and let it go. This can happen with
1802 * jumbo mtu on a non-TCP/UDP frame.
1803 */
Ron Mercer7c734352009-10-19 03:32:19 +00001804 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 skb = netdev_alloc_skb(qdev->ndev, length);
1806 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001807 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1808 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001809 return NULL;
1810 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001811 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001812 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001813 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001814 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001815 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001817 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1818 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1819 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001820 skb_fill_page_desc(skb, 0,
1821 lbq_desc->p.pg_chunk.page,
1822 lbq_desc->p.pg_chunk.offset,
1823 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 skb->len += length;
1825 skb->data_len += length;
1826 skb->truesize += length;
1827 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 __pskb_pull_tail(skb,
1829 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1830 VLAN_ETH_HLEN : ETH_HLEN);
1831 }
1832 } else {
1833 /*
1834 * The data is in a chain of large buffers
1835 * pointed to by a small buffer. We loop
1836 * thru and chain them to the our small header
1837 * buffer's skb.
1838 * frags: There are 18 max frags and our small
1839 * buffer will hold 32 of them. The thing is,
1840 * we'll use 3 max for our 9000 byte jumbo
1841 * frames. If the MTU goes up we could
1842 * eventually be in trouble.
1843 */
Ron Mercer7c734352009-10-19 03:32:19 +00001844 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001845 sbq_desc = ql_get_curr_sbuf(rx_ring);
1846 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001847 dma_unmap_addr(sbq_desc, mapaddr),
1848 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 PCI_DMA_FROMDEVICE);
1850 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1851 /*
1852 * This is an non TCP/UDP IP frame, so
1853 * the headers aren't split into a small
1854 * buffer. We have to use the small buffer
1855 * that contains our sg list as our skb to
1856 * send upstairs. Copy the sg list here to
1857 * a local buffer and use it to find the
1858 * pages to chain.
1859 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001860 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1861 "%d bytes of headers & data in chain of large.\n",
1862 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001864 sbq_desc->p.skb = NULL;
1865 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 }
1867 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001868 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1869 size = (length < rx_ring->lbq_buf_size) ? length :
1870 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001871
Joe Perchesae9540f72010-02-09 11:49:52 +00001872 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873 "Adding page %d to skb for %d bytes.\n",
1874 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001875 skb_fill_page_desc(skb, i,
1876 lbq_desc->p.pg_chunk.page,
1877 lbq_desc->p.pg_chunk.offset,
1878 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001879 skb->len += size;
1880 skb->data_len += size;
1881 skb->truesize += size;
1882 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001883 i++;
1884 }
1885 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1886 VLAN_ETH_HLEN : ETH_HLEN);
1887 }
1888 return skb;
1889}
1890
1891/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001892static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001893 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001894 struct ib_mac_iocb_rsp *ib_mac_rsp,
1895 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896{
1897 struct net_device *ndev = qdev->ndev;
1898 struct sk_buff *skb = NULL;
1899
1900 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1901
1902 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1903 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001906 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001907 return;
1908 }
1909
Ron Mercera32959c2009-06-09 05:39:27 +00001910 /* Frame error, so drop the packet. */
1911 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001912 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001913 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001914 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001915 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001916 return;
1917 }
Ron Mercerec33a492009-06-09 05:39:28 +00001918
1919 /* The max framesize filter on this chip is set higher than
1920 * MTU since FCoE uses 2k frames.
1921 */
1922 if (skb->len > ndev->mtu + ETH_HLEN) {
1923 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001924 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001925 return;
1926 }
1927
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001928 /* loopback self test for ethtool */
1929 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1930 ql_check_lb_frame(qdev, skb);
1931 dev_kfree_skb_any(skb);
1932 return;
1933 }
1934
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001935 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001936 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1938 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1939 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001944 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001945 }
1946 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001947 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1948 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001949 }
Ron Mercerd555f592009-03-09 10:59:19 +00001950
Ron Mercerd555f592009-03-09 10:59:19 +00001951 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001952 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001953
1954 /* If rx checksum is on, and there are no
1955 * csum or frame errors.
1956 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001957 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001958 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1959 /* TCP frame. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001961 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1962 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001963 skb->ip_summed = CHECKSUM_UNNECESSARY;
1964 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1965 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1966 /* Unfragmented ipv4 UDP frame. */
1967 struct iphdr *iph = (struct iphdr *) skb->data;
1968 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001969 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00001970 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1972 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001973 }
1974 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001975 }
Ron Mercerd555f592009-03-09 10:59:19 +00001976
Ron Mercer885ee392009-11-03 13:49:31 +00001977 rx_ring->rx_packets++;
1978 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001979 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001980 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1981 __vlan_hwaccel_put_tag(skb, vlan_id);
1982 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1983 napi_gro_receive(&rx_ring->napi, skb);
1984 else
1985 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001986}
1987
Ron Mercer4f848c02010-01-02 10:37:43 +00001988/* Process an inbound completion from an rx ring. */
1989static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1990 struct rx_ring *rx_ring,
1991 struct ib_mac_iocb_rsp *ib_mac_rsp)
1992{
1993 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1994 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1995 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1996 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1997
1998 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1999
2000 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2001 /* The data and headers are split into
2002 * separate buffers.
2003 */
2004 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2005 vlan_id);
2006 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2007 /* The data fit in a single small buffer.
2008 * Allocate a new skb, copy the data and
2009 * return the buffer to the free pool.
2010 */
2011 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2012 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002013 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2014 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2015 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2016 /* TCP packet in a page chunk that's been checksummed.
2017 * Tack it on to our GRO skb and let it go.
2018 */
2019 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2020 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002021 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2022 /* Non-TCP packet in a page chunk. Allocate an
2023 * skb, tack it on frags, and send it up.
2024 */
2025 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2026 length, vlan_id);
2027 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002028 /* Non-TCP/UDP large frames that span multiple buffers
2029 * can be processed corrrectly by the split frame logic.
2030 */
2031 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2032 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002033 }
2034
2035 return (unsigned long)length;
2036}
2037
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002038/* Process an outbound completion from an rx ring. */
2039static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2040 struct ob_mac_iocb_rsp *mac_rsp)
2041{
2042 struct tx_ring *tx_ring;
2043 struct tx_ring_desc *tx_ring_desc;
2044
2045 QL_DUMP_OB_MAC_RSP(mac_rsp);
2046 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2047 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2048 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002049 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2050 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002051 dev_kfree_skb(tx_ring_desc->skb);
2052 tx_ring_desc->skb = NULL;
2053
2054 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2055 OB_MAC_IOCB_RSP_S |
2056 OB_MAC_IOCB_RSP_L |
2057 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2058 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002059 netif_warn(qdev, tx_done, qdev->ndev,
2060 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002061 }
2062 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002063 netif_warn(qdev, tx_done, qdev->ndev,
2064 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002065 }
2066 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002067 netif_warn(qdev, tx_done, qdev->ndev,
2068 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002069 }
2070 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002071 netif_warn(qdev, tx_done, qdev->ndev,
2072 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002073 }
2074 }
2075 atomic_inc(&tx_ring->tx_count);
2076}
2077
2078/* Fire up a handler to reset the MPI processor. */
2079void ql_queue_fw_error(struct ql_adapter *qdev)
2080{
Ron Mercer6a473302009-07-02 06:06:12 +00002081 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002082 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2083}
2084
2085void ql_queue_asic_error(struct ql_adapter *qdev)
2086{
Ron Mercer6a473302009-07-02 06:06:12 +00002087 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002088 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002089 /* Clear adapter up bit to signal the recovery
2090 * process that it shouldn't kill the reset worker
2091 * thread
2092 */
2093 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002094 /* Set asic recovery bit to indicate reset process that we are
2095 * in fatal error recovery process rather than normal close
2096 */
2097 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002098 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2099}
2100
2101static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2102 struct ib_ae_iocb_rsp *ib_ae_rsp)
2103{
2104 switch (ib_ae_rsp->event) {
2105 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002106 netif_err(qdev, rx_err, qdev->ndev,
2107 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002108 ql_queue_fw_error(qdev);
2109 return;
2110
2111 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002112 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2113 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002114 ql_queue_asic_error(qdev);
2115 return;
2116
2117 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002118 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002119 ql_queue_asic_error(qdev);
2120 break;
2121
2122 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002123 netdev_err(qdev->ndev, "PCI error occurred when reading "
2124 "anonymous buffers from rx_ring %d.\n",
2125 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 ql_queue_asic_error(qdev);
2127 break;
2128
2129 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002130 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2131 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 ql_queue_asic_error(qdev);
2133 break;
2134 }
2135}
2136
2137static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2138{
2139 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002140 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002141 struct ob_mac_iocb_rsp *net_rsp = NULL;
2142 int count = 0;
2143
Ron Mercer1e213302009-03-09 10:59:21 +00002144 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 /* While there are entries in the completion queue. */
2146 while (prod != rx_ring->cnsmr_idx) {
2147
Joe Perchesae9540f72010-02-09 11:49:52 +00002148 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2149 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2150 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002151
2152 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2153 rmb();
2154 switch (net_rsp->opcode) {
2155
2156 case OPCODE_OB_MAC_TSO_IOCB:
2157 case OPCODE_OB_MAC_IOCB:
2158 ql_process_mac_tx_intr(qdev, net_rsp);
2159 break;
2160 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2162 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2163 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002164 }
2165 count++;
2166 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002167 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002168 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002169 if (!net_rsp)
2170 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002171 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002172 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002173 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002174 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002175 /*
2176 * The queue got stopped because the tx_ring was full.
2177 * Wake it up, because it's now at least 25% empty.
2178 */
Ron Mercer1e213302009-03-09 10:59:21 +00002179 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002180 }
2181
2182 return count;
2183}
2184
2185static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2186{
2187 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002188 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002189 struct ql_net_rsp_iocb *net_rsp;
2190 int count = 0;
2191
2192 /* While there are entries in the completion queue. */
2193 while (prod != rx_ring->cnsmr_idx) {
2194
Joe Perchesae9540f72010-02-09 11:49:52 +00002195 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2197 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002198
2199 net_rsp = rx_ring->curr_entry;
2200 rmb();
2201 switch (net_rsp->opcode) {
2202 case OPCODE_IB_MAC_IOCB:
2203 ql_process_mac_rx_intr(qdev, rx_ring,
2204 (struct ib_mac_iocb_rsp *)
2205 net_rsp);
2206 break;
2207
2208 case OPCODE_IB_AE_IOCB:
2209 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2210 net_rsp);
2211 break;
2212 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2214 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2215 net_rsp->opcode);
2216 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002217 }
2218 count++;
2219 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002220 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002221 if (count == budget)
2222 break;
2223 }
2224 ql_update_buffer_queues(qdev, rx_ring);
2225 ql_write_cq_idx(rx_ring);
2226 return count;
2227}
2228
2229static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2230{
2231 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2232 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002233 struct rx_ring *trx_ring;
2234 int i, work_done = 0;
2235 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002236
Joe Perchesae9540f72010-02-09 11:49:52 +00002237 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2238 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239
Ron Mercer39aa8162009-08-27 11:02:11 +00002240 /* Service the TX rings first. They start
2241 * right after the RSS rings. */
2242 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2243 trx_ring = &qdev->rx_ring[i];
2244 /* If this TX completion ring belongs to this vector and
2245 * it's not empty then service it.
2246 */
2247 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2248 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2249 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002250 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2251 "%s: Servicing TX completion ring %d.\n",
2252 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002253 ql_clean_outbound_rx_ring(trx_ring);
2254 }
2255 }
2256
2257 /*
2258 * Now service the RSS ring if it's active.
2259 */
2260 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2261 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002262 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2263 "%s: Servicing RX completion ring %d.\n",
2264 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002265 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2266 }
2267
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002268 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002269 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002270 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2271 }
2272 return work_done;
2273}
2274
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002275static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002276{
2277 struct ql_adapter *qdev = netdev_priv(ndev);
2278
Jiri Pirko18c49b92011-07-21 03:24:11 +00002279 if (features & NETIF_F_HW_VLAN_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002280 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002281 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002282 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002283 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2284 }
2285}
2286
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002287static netdev_features_t qlge_fix_features(struct net_device *ndev,
2288 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002289{
2290 /*
2291 * Since there is no support for separate rx/tx vlan accel
2292 * enable/disable make sure tx flag is always in same state as rx.
2293 */
2294 if (features & NETIF_F_HW_VLAN_RX)
2295 features |= NETIF_F_HW_VLAN_TX;
2296 else
2297 features &= ~NETIF_F_HW_VLAN_TX;
2298
2299 return features;
2300}
2301
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002302static int qlge_set_features(struct net_device *ndev,
2303 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002304{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002305 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002306
2307 if (changed & NETIF_F_HW_VLAN_RX)
2308 qlge_vlan_mode(ndev, features);
2309
2310 return 0;
2311}
2312
Jiri Pirko8e586132011-12-08 19:52:37 -05002313static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002314{
2315 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002316 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002317
Jiri Pirko8e586132011-12-08 19:52:37 -05002318 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2319 MAC_ADDR_TYPE_VLAN, vid);
2320 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002321 netif_err(qdev, ifup, qdev->ndev,
2322 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002323 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002324}
2325
Jiri Pirko8e586132011-12-08 19:52:37 -05002326static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002327{
2328 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002329 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002330 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002331
Ron Mercercc288f52009-02-23 10:42:14 +00002332 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2333 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002334 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002335
Jiri Pirko8e586132011-12-08 19:52:37 -05002336 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002337 set_bit(vid, qdev->active_vlans);
2338
Ron Mercercc288f52009-02-23 10:42:14 +00002339 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002340
2341 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002342}
2343
Jiri Pirko8e586132011-12-08 19:52:37 -05002344static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002346 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002347 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002348
Jiri Pirko8e586132011-12-08 19:52:37 -05002349 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2350 MAC_ADDR_TYPE_VLAN, vid);
2351 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002352 netif_err(qdev, ifup, qdev->ndev,
2353 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002354 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002355}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002356
Jiri Pirko8e586132011-12-08 19:52:37 -05002357static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002358{
2359 struct ql_adapter *qdev = netdev_priv(ndev);
2360 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002361 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002362
2363 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2364 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002365 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002366
Jiri Pirko8e586132011-12-08 19:52:37 -05002367 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002368 clear_bit(vid, qdev->active_vlans);
2369
2370 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002371
2372 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002373}
2374
Ron Mercerc1b60092010-10-27 04:58:12 +00002375static void qlge_restore_vlan(struct ql_adapter *qdev)
2376{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002377 int status;
2378 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002379
Jiri Pirko18c49b92011-07-21 03:24:11 +00002380 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2381 if (status)
2382 return;
2383
2384 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2385 __qlge_vlan_rx_add_vid(qdev, vid);
2386
2387 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002388}
2389
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002390/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2391static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2392{
2393 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002394 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002395 return IRQ_HANDLED;
2396}
2397
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002398/* This handles a fatal error, MPI activity, and the default
2399 * rx_ring in an MSI-X multiple vector environment.
2400 * In MSI/Legacy environment it also process the rest of
2401 * the rx_rings.
2402 */
2403static irqreturn_t qlge_isr(int irq, void *dev_id)
2404{
2405 struct rx_ring *rx_ring = dev_id;
2406 struct ql_adapter *qdev = rx_ring->qdev;
2407 struct intr_context *intr_context = &qdev->intr_context[0];
2408 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002409 int work_done = 0;
2410
Ron Mercerbb0d2152008-10-20 10:30:26 -07002411 spin_lock(&qdev->hw_lock);
2412 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002413 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2414 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002415 spin_unlock(&qdev->hw_lock);
2416 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002417 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002418 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002419
Ron Mercerbb0d2152008-10-20 10:30:26 -07002420 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002421
2422 /*
2423 * Check for fatal error.
2424 */
2425 if (var & STS_FE) {
2426 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002427 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002428 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002429 netdev_err(qdev->ndev, "Resetting chip. "
2430 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002431 return IRQ_HANDLED;
2432 }
2433
2434 /*
2435 * Check MPI processor activity.
2436 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002437 if ((var & STS_PI) &&
2438 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002439 /*
2440 * We've got an async event or mailbox completion.
2441 * Handle it and clear the source of the interrupt.
2442 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002443 netif_err(qdev, intr, qdev->ndev,
2444 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002445 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002446 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2447 queue_delayed_work_on(smp_processor_id(),
2448 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002449 work_done++;
2450 }
2451
2452 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002453 * Get the bit-mask that shows the active queues for this
2454 * pass. Compare it to the queues that this irq services
2455 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002456 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002457 var = ql_read32(qdev, ISR1);
2458 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002459 netif_info(qdev, intr, qdev->ndev,
2460 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002461 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002462 napi_schedule(&rx_ring->napi);
2463 work_done++;
2464 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002465 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002466 return work_done ? IRQ_HANDLED : IRQ_NONE;
2467}
2468
2469static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2470{
2471
2472 if (skb_is_gso(skb)) {
2473 int err;
2474 if (skb_header_cloned(skb)) {
2475 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2476 if (err)
2477 return err;
2478 }
2479
2480 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2481 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2482 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2483 mac_iocb_ptr->total_hdrs_len =
2484 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2485 mac_iocb_ptr->net_trans_offset =
2486 cpu_to_le16(skb_network_offset(skb) |
2487 skb_transport_offset(skb)
2488 << OB_MAC_TRANSPORT_HDR_SHIFT);
2489 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2490 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2491 if (likely(skb->protocol == htons(ETH_P_IP))) {
2492 struct iphdr *iph = ip_hdr(skb);
2493 iph->check = 0;
2494 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2495 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2496 iph->daddr, 0,
2497 IPPROTO_TCP,
2498 0);
2499 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2500 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2501 tcp_hdr(skb)->check =
2502 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2503 &ipv6_hdr(skb)->daddr,
2504 0, IPPROTO_TCP, 0);
2505 }
2506 return 1;
2507 }
2508 return 0;
2509}
2510
2511static void ql_hw_csum_setup(struct sk_buff *skb,
2512 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2513{
2514 int len;
2515 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002516 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002517 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2518 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2519 mac_iocb_ptr->net_trans_offset =
2520 cpu_to_le16(skb_network_offset(skb) |
2521 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2522
2523 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2524 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2525 if (likely(iph->protocol == IPPROTO_TCP)) {
2526 check = &(tcp_hdr(skb)->check);
2527 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2528 mac_iocb_ptr->total_hdrs_len =
2529 cpu_to_le16(skb_transport_offset(skb) +
2530 (tcp_hdr(skb)->doff << 2));
2531 } else {
2532 check = &(udp_hdr(skb)->check);
2533 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2534 mac_iocb_ptr->total_hdrs_len =
2535 cpu_to_le16(skb_transport_offset(skb) +
2536 sizeof(struct udphdr));
2537 }
2538 *check = ~csum_tcpudp_magic(iph->saddr,
2539 iph->daddr, len, iph->protocol, 0);
2540}
2541
Stephen Hemminger613573252009-08-31 19:50:58 +00002542static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002543{
2544 struct tx_ring_desc *tx_ring_desc;
2545 struct ob_mac_iocb_req *mac_iocb_ptr;
2546 struct ql_adapter *qdev = netdev_priv(ndev);
2547 int tso;
2548 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002549 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002550
2551 tx_ring = &qdev->tx_ring[tx_ring_idx];
2552
Ron Mercer74c50b42009-03-09 10:59:27 +00002553 if (skb_padto(skb, ETH_ZLEN))
2554 return NETDEV_TX_OK;
2555
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002556 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002557 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002558 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002559 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002560 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002561 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002562 return NETDEV_TX_BUSY;
2563 }
2564 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2565 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002566 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002567
2568 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2569 mac_iocb_ptr->tid = tx_ring_desc->index;
2570 /* We use the upper 32-bits to store the tx queue for this IO.
2571 * When we get the completion we can use it to establish the context.
2572 */
2573 mac_iocb_ptr->txq_idx = tx_ring_idx;
2574 tx_ring_desc->skb = skb;
2575
2576 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2577
Jesse Grosseab6d182010-10-20 13:56:03 +00002578 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002579 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2580 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002581 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2582 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2583 }
2584 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2585 if (tso < 0) {
2586 dev_kfree_skb_any(skb);
2587 return NETDEV_TX_OK;
2588 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2589 ql_hw_csum_setup(skb,
2590 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2591 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002592 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2593 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002594 netif_err(qdev, tx_queued, qdev->ndev,
2595 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002596 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002597 return NETDEV_TX_BUSY;
2598 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002599 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2600 tx_ring->prod_idx++;
2601 if (tx_ring->prod_idx == tx_ring->wq_len)
2602 tx_ring->prod_idx = 0;
2603 wmb();
2604
2605 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002606 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2607 "tx queued, slot %d, len %d\n",
2608 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002609
2610 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002611
2612 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2613 netif_stop_subqueue(ndev, tx_ring->wq_id);
2614 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2615 /*
2616 * The queue got stopped because the tx_ring was full.
2617 * Wake it up, because it's now at least 25% empty.
2618 */
2619 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2620 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002621 return NETDEV_TX_OK;
2622}
2623
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002624
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002625static void ql_free_shadow_space(struct ql_adapter *qdev)
2626{
2627 if (qdev->rx_ring_shadow_reg_area) {
2628 pci_free_consistent(qdev->pdev,
2629 PAGE_SIZE,
2630 qdev->rx_ring_shadow_reg_area,
2631 qdev->rx_ring_shadow_reg_dma);
2632 qdev->rx_ring_shadow_reg_area = NULL;
2633 }
2634 if (qdev->tx_ring_shadow_reg_area) {
2635 pci_free_consistent(qdev->pdev,
2636 PAGE_SIZE,
2637 qdev->tx_ring_shadow_reg_area,
2638 qdev->tx_ring_shadow_reg_dma);
2639 qdev->tx_ring_shadow_reg_area = NULL;
2640 }
2641}
2642
2643static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2644{
2645 qdev->rx_ring_shadow_reg_area =
2646 pci_alloc_consistent(qdev->pdev,
2647 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2648 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002649 netif_err(qdev, ifup, qdev->ndev,
2650 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002651 return -ENOMEM;
2652 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002653 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002654 qdev->tx_ring_shadow_reg_area =
2655 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2656 &qdev->tx_ring_shadow_reg_dma);
2657 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002658 netif_err(qdev, ifup, qdev->ndev,
2659 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002660 goto err_wqp_sh_area;
2661 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002662 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002663 return 0;
2664
2665err_wqp_sh_area:
2666 pci_free_consistent(qdev->pdev,
2667 PAGE_SIZE,
2668 qdev->rx_ring_shadow_reg_area,
2669 qdev->rx_ring_shadow_reg_dma);
2670 return -ENOMEM;
2671}
2672
2673static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2674{
2675 struct tx_ring_desc *tx_ring_desc;
2676 int i;
2677 struct ob_mac_iocb_req *mac_iocb_ptr;
2678
2679 mac_iocb_ptr = tx_ring->wq_base;
2680 tx_ring_desc = tx_ring->q;
2681 for (i = 0; i < tx_ring->wq_len; i++) {
2682 tx_ring_desc->index = i;
2683 tx_ring_desc->skb = NULL;
2684 tx_ring_desc->queue_entry = mac_iocb_ptr;
2685 mac_iocb_ptr++;
2686 tx_ring_desc++;
2687 }
2688 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002689}
2690
2691static void ql_free_tx_resources(struct ql_adapter *qdev,
2692 struct tx_ring *tx_ring)
2693{
2694 if (tx_ring->wq_base) {
2695 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2696 tx_ring->wq_base, tx_ring->wq_base_dma);
2697 tx_ring->wq_base = NULL;
2698 }
2699 kfree(tx_ring->q);
2700 tx_ring->q = NULL;
2701}
2702
2703static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2704 struct tx_ring *tx_ring)
2705{
2706 tx_ring->wq_base =
2707 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2708 &tx_ring->wq_base_dma);
2709
Joe Perches8e95a202009-12-03 07:58:21 +00002710 if ((tx_ring->wq_base == NULL) ||
2711 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002712 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002713 return -ENOMEM;
2714 }
2715 tx_ring->q =
2716 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2717 if (tx_ring->q == NULL)
2718 goto err;
2719
2720 return 0;
2721err:
2722 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2723 tx_ring->wq_base, tx_ring->wq_base_dma);
2724 return -ENOMEM;
2725}
2726
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002727static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002728{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002729 struct bq_desc *lbq_desc;
2730
Ron Mercer7c734352009-10-19 03:32:19 +00002731 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002732
Ron Mercer7c734352009-10-19 03:32:19 +00002733 curr_idx = rx_ring->lbq_curr_idx;
2734 clean_idx = rx_ring->lbq_clean_idx;
2735 while (curr_idx != clean_idx) {
2736 lbq_desc = &rx_ring->lbq[curr_idx];
2737
2738 if (lbq_desc->p.pg_chunk.last_flag) {
2739 pci_unmap_page(qdev->pdev,
2740 lbq_desc->p.pg_chunk.map,
2741 ql_lbq_block_size(qdev),
2742 PCI_DMA_FROMDEVICE);
2743 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002744 }
Ron Mercer7c734352009-10-19 03:32:19 +00002745
2746 put_page(lbq_desc->p.pg_chunk.page);
2747 lbq_desc->p.pg_chunk.page = NULL;
2748
2749 if (++curr_idx == rx_ring->lbq_len)
2750 curr_idx = 0;
2751
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002752 }
2753}
2754
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002755static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002756{
2757 int i;
2758 struct bq_desc *sbq_desc;
2759
2760 for (i = 0; i < rx_ring->sbq_len; i++) {
2761 sbq_desc = &rx_ring->sbq[i];
2762 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002763 netif_err(qdev, ifup, qdev->ndev,
2764 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002765 return;
2766 }
2767 if (sbq_desc->p.skb) {
2768 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002769 dma_unmap_addr(sbq_desc, mapaddr),
2770 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002771 PCI_DMA_FROMDEVICE);
2772 dev_kfree_skb(sbq_desc->p.skb);
2773 sbq_desc->p.skb = NULL;
2774 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775 }
2776}
2777
Ron Mercer4545a3f2009-02-23 10:42:17 +00002778/* Free all large and small rx buffers associated
2779 * with the completion queues for this device.
2780 */
2781static void ql_free_rx_buffers(struct ql_adapter *qdev)
2782{
2783 int i;
2784 struct rx_ring *rx_ring;
2785
2786 for (i = 0; i < qdev->rx_ring_count; i++) {
2787 rx_ring = &qdev->rx_ring[i];
2788 if (rx_ring->lbq)
2789 ql_free_lbq_buffers(qdev, rx_ring);
2790 if (rx_ring->sbq)
2791 ql_free_sbq_buffers(qdev, rx_ring);
2792 }
2793}
2794
2795static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2796{
2797 struct rx_ring *rx_ring;
2798 int i;
2799
2800 for (i = 0; i < qdev->rx_ring_count; i++) {
2801 rx_ring = &qdev->rx_ring[i];
2802 if (rx_ring->type != TX_Q)
2803 ql_update_buffer_queues(qdev, rx_ring);
2804 }
2805}
2806
2807static void ql_init_lbq_ring(struct ql_adapter *qdev,
2808 struct rx_ring *rx_ring)
2809{
2810 int i;
2811 struct bq_desc *lbq_desc;
2812 __le64 *bq = rx_ring->lbq_base;
2813
2814 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2815 for (i = 0; i < rx_ring->lbq_len; i++) {
2816 lbq_desc = &rx_ring->lbq[i];
2817 memset(lbq_desc, 0, sizeof(*lbq_desc));
2818 lbq_desc->index = i;
2819 lbq_desc->addr = bq;
2820 bq++;
2821 }
2822}
2823
2824static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002825 struct rx_ring *rx_ring)
2826{
2827 int i;
2828 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002829 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002830
Ron Mercer4545a3f2009-02-23 10:42:17 +00002831 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002832 for (i = 0; i < rx_ring->sbq_len; i++) {
2833 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002834 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002835 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002836 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002837 bq++;
2838 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002839}
2840
2841static void ql_free_rx_resources(struct ql_adapter *qdev,
2842 struct rx_ring *rx_ring)
2843{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002844 /* Free the small buffer queue. */
2845 if (rx_ring->sbq_base) {
2846 pci_free_consistent(qdev->pdev,
2847 rx_ring->sbq_size,
2848 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2849 rx_ring->sbq_base = NULL;
2850 }
2851
2852 /* Free the small buffer queue control blocks. */
2853 kfree(rx_ring->sbq);
2854 rx_ring->sbq = NULL;
2855
2856 /* Free the large buffer queue. */
2857 if (rx_ring->lbq_base) {
2858 pci_free_consistent(qdev->pdev,
2859 rx_ring->lbq_size,
2860 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2861 rx_ring->lbq_base = NULL;
2862 }
2863
2864 /* Free the large buffer queue control blocks. */
2865 kfree(rx_ring->lbq);
2866 rx_ring->lbq = NULL;
2867
2868 /* Free the rx queue. */
2869 if (rx_ring->cq_base) {
2870 pci_free_consistent(qdev->pdev,
2871 rx_ring->cq_size,
2872 rx_ring->cq_base, rx_ring->cq_base_dma);
2873 rx_ring->cq_base = NULL;
2874 }
2875}
2876
2877/* Allocate queues and buffers for this completions queue based
2878 * on the values in the parameter structure. */
2879static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2880 struct rx_ring *rx_ring)
2881{
2882
2883 /*
2884 * Allocate the completion queue for this rx_ring.
2885 */
2886 rx_ring->cq_base =
2887 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2888 &rx_ring->cq_base_dma);
2889
2890 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002891 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002892 return -ENOMEM;
2893 }
2894
2895 if (rx_ring->sbq_len) {
2896 /*
2897 * Allocate small buffer queue.
2898 */
2899 rx_ring->sbq_base =
2900 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2901 &rx_ring->sbq_base_dma);
2902
2903 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002904 netif_err(qdev, ifup, qdev->ndev,
2905 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002906 goto err_mem;
2907 }
2908
2909 /*
2910 * Allocate small buffer queue control blocks.
2911 */
2912 rx_ring->sbq =
2913 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2914 GFP_KERNEL);
2915 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002916 netif_err(qdev, ifup, qdev->ndev,
2917 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002918 goto err_mem;
2919 }
2920
Ron Mercer4545a3f2009-02-23 10:42:17 +00002921 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002922 }
2923
2924 if (rx_ring->lbq_len) {
2925 /*
2926 * Allocate large buffer queue.
2927 */
2928 rx_ring->lbq_base =
2929 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2930 &rx_ring->lbq_base_dma);
2931
2932 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002933 netif_err(qdev, ifup, qdev->ndev,
2934 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002935 goto err_mem;
2936 }
2937 /*
2938 * Allocate large buffer queue control blocks.
2939 */
2940 rx_ring->lbq =
2941 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2942 GFP_KERNEL);
2943 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002944 netif_err(qdev, ifup, qdev->ndev,
2945 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002946 goto err_mem;
2947 }
2948
Ron Mercer4545a3f2009-02-23 10:42:17 +00002949 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002950 }
2951
2952 return 0;
2953
2954err_mem:
2955 ql_free_rx_resources(qdev, rx_ring);
2956 return -ENOMEM;
2957}
2958
2959static void ql_tx_ring_clean(struct ql_adapter *qdev)
2960{
2961 struct tx_ring *tx_ring;
2962 struct tx_ring_desc *tx_ring_desc;
2963 int i, j;
2964
2965 /*
2966 * Loop through all queues and free
2967 * any resources.
2968 */
2969 for (j = 0; j < qdev->tx_ring_count; j++) {
2970 tx_ring = &qdev->tx_ring[j];
2971 for (i = 0; i < tx_ring->wq_len; i++) {
2972 tx_ring_desc = &tx_ring->q[i];
2973 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002974 netif_err(qdev, ifdown, qdev->ndev,
2975 "Freeing lost SKB %p, from queue %d, index %d.\n",
2976 tx_ring_desc->skb, j,
2977 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002978 ql_unmap_send(qdev, tx_ring_desc,
2979 tx_ring_desc->map_cnt);
2980 dev_kfree_skb(tx_ring_desc->skb);
2981 tx_ring_desc->skb = NULL;
2982 }
2983 }
2984 }
2985}
2986
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002987static void ql_free_mem_resources(struct ql_adapter *qdev)
2988{
2989 int i;
2990
2991 for (i = 0; i < qdev->tx_ring_count; i++)
2992 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2993 for (i = 0; i < qdev->rx_ring_count; i++)
2994 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2995 ql_free_shadow_space(qdev);
2996}
2997
2998static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2999{
3000 int i;
3001
3002 /* Allocate space for our shadow registers and such. */
3003 if (ql_alloc_shadow_space(qdev))
3004 return -ENOMEM;
3005
3006 for (i = 0; i < qdev->rx_ring_count; i++) {
3007 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003008 netif_err(qdev, ifup, qdev->ndev,
3009 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003010 goto err_mem;
3011 }
3012 }
3013 /* Allocate tx queue resources */
3014 for (i = 0; i < qdev->tx_ring_count; i++) {
3015 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003016 netif_err(qdev, ifup, qdev->ndev,
3017 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003018 goto err_mem;
3019 }
3020 }
3021 return 0;
3022
3023err_mem:
3024 ql_free_mem_resources(qdev);
3025 return -ENOMEM;
3026}
3027
3028/* Set up the rx ring control block and pass it to the chip.
3029 * The control block is defined as
3030 * "Completion Queue Initialization Control Block", or cqicb.
3031 */
3032static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3033{
3034 struct cqicb *cqicb = &rx_ring->cqicb;
3035 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003036 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003037 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003038 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003039 void __iomem *doorbell_area =
3040 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3041 int err = 0;
3042 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003043 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003044 __le64 *base_indirect_ptr;
3045 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003046
3047 /* Set up the shadow registers for this ring. */
3048 rx_ring->prod_idx_sh_reg = shadow_reg;
3049 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003050 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003051 shadow_reg += sizeof(u64);
3052 shadow_reg_dma += sizeof(u64);
3053 rx_ring->lbq_base_indirect = shadow_reg;
3054 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003055 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3056 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003057 rx_ring->sbq_base_indirect = shadow_reg;
3058 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3059
3060 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003061 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003062 rx_ring->cnsmr_idx = 0;
3063 rx_ring->curr_entry = rx_ring->cq_base;
3064
3065 /* PCI doorbell mem area + 0x04 for valid register */
3066 rx_ring->valid_db_reg = doorbell_area + 0x04;
3067
3068 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003069 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003070
3071 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003072 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003073
3074 memset((void *)cqicb, 0, sizeof(struct cqicb));
3075 cqicb->msix_vect = rx_ring->irq;
3076
Ron Mercer459caf52009-01-04 17:08:11 -08003077 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3078 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003079
Ron Mercer97345522009-01-09 11:31:50 +00003080 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003081
Ron Mercer97345522009-01-09 11:31:50 +00003082 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003083
3084 /*
3085 * Set up the control block load flags.
3086 */
3087 cqicb->flags = FLAGS_LC | /* Load queue base address */
3088 FLAGS_LV | /* Load MSI-X vector */
3089 FLAGS_LI; /* Load irq delay values */
3090 if (rx_ring->lbq_len) {
3091 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003092 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003093 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003094 page_entries = 0;
3095 do {
3096 *base_indirect_ptr = cpu_to_le64(tmp);
3097 tmp += DB_PAGE_SIZE;
3098 base_indirect_ptr++;
3099 page_entries++;
3100 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003101 cqicb->lbq_addr =
3102 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003103 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3104 (u16) rx_ring->lbq_buf_size;
3105 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3106 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3107 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003108 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003109 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003110 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003111 rx_ring->lbq_clean_idx = 0;
3112 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003113 }
3114 if (rx_ring->sbq_len) {
3115 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003116 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003117 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003118 page_entries = 0;
3119 do {
3120 *base_indirect_ptr = cpu_to_le64(tmp);
3121 tmp += DB_PAGE_SIZE;
3122 base_indirect_ptr++;
3123 page_entries++;
3124 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003125 cqicb->sbq_addr =
3126 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003127 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003128 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003129 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3130 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003131 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003132 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003133 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003134 rx_ring->sbq_clean_idx = 0;
3135 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003136 }
3137 switch (rx_ring->type) {
3138 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003139 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3140 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3141 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 case RX_Q:
3143 /* Inbound completion handling rx_rings run in
3144 * separate NAPI contexts.
3145 */
3146 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3147 64);
3148 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3149 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3150 break;
3151 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003152 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3153 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003155 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3156 CFG_LCQ, rx_ring->cq_id);
3157 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003158 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003159 return err;
3160 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003161 return err;
3162}
3163
3164static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3165{
3166 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3167 void __iomem *doorbell_area =
3168 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3169 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3170 (tx_ring->wq_id * sizeof(u64));
3171 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3172 (tx_ring->wq_id * sizeof(u64));
3173 int err = 0;
3174
3175 /*
3176 * Assign doorbell registers for this tx_ring.
3177 */
3178 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003179 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003180 tx_ring->prod_idx = 0;
3181 /* TX PCI doorbell mem area + 0x04 */
3182 tx_ring->valid_db_reg = doorbell_area + 0x04;
3183
3184 /*
3185 * Assign shadow registers for this tx_ring.
3186 */
3187 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3188 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3189
3190 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3191 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3192 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3193 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3194 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003195 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003196
Ron Mercer97345522009-01-09 11:31:50 +00003197 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198
3199 ql_init_tx_ring(qdev, tx_ring);
3200
Ron Mercere3324712009-07-02 06:06:13 +00003201 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003202 (u16) tx_ring->wq_id);
3203 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003204 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003205 return err;
3206 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003207 return err;
3208}
3209
3210static void ql_disable_msix(struct ql_adapter *qdev)
3211{
3212 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3213 pci_disable_msix(qdev->pdev);
3214 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3215 kfree(qdev->msi_x_entry);
3216 qdev->msi_x_entry = NULL;
3217 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3218 pci_disable_msi(qdev->pdev);
3219 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3220 }
3221}
3222
Ron Mercera4ab6132009-08-27 11:02:10 +00003223/* We start by trying to get the number of vectors
3224 * stored in qdev->intr_count. If we don't get that
3225 * many then we reduce the count and try again.
3226 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003227static void ql_enable_msix(struct ql_adapter *qdev)
3228{
Ron Mercera4ab6132009-08-27 11:02:10 +00003229 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003231 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003232 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003233 /* Try to alloc space for the msix struct,
3234 * if it fails then go to MSI/legacy.
3235 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003236 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003237 sizeof(struct msix_entry),
3238 GFP_KERNEL);
3239 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003240 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003241 goto msi;
3242 }
3243
Ron Mercera4ab6132009-08-27 11:02:10 +00003244 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003245 qdev->msi_x_entry[i].entry = i;
3246
Ron Mercera4ab6132009-08-27 11:02:10 +00003247 /* Loop to get our vectors. We start with
3248 * what we want and settle for what we get.
3249 */
3250 do {
3251 err = pci_enable_msix(qdev->pdev,
3252 qdev->msi_x_entry, qdev->intr_count);
3253 if (err > 0)
3254 qdev->intr_count = err;
3255 } while (err > 0);
3256
3257 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003258 kfree(qdev->msi_x_entry);
3259 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003260 netif_warn(qdev, ifup, qdev->ndev,
3261 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003262 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003263 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003264 } else if (err == 0) {
3265 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003266 netif_info(qdev, ifup, qdev->ndev,
3267 "MSI-X Enabled, got %d vectors.\n",
3268 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003269 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003270 }
3271 }
3272msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003273 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003274 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003275 if (!pci_enable_msi(qdev->pdev)) {
3276 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003277 netif_info(qdev, ifup, qdev->ndev,
3278 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003279 return;
3280 }
3281 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003282 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003283 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3284 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003285}
3286
Ron Mercer39aa8162009-08-27 11:02:11 +00003287/* Each vector services 1 RSS ring and and 1 or more
3288 * TX completion rings. This function loops through
3289 * the TX completion rings and assigns the vector that
3290 * will service it. An example would be if there are
3291 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3292 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003293 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003294 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3295 */
3296static void ql_set_tx_vect(struct ql_adapter *qdev)
3297{
3298 int i, j, vect;
3299 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3300
3301 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3302 /* Assign irq vectors to TX rx_rings.*/
3303 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3304 i < qdev->rx_ring_count; i++) {
3305 if (j == tx_rings_per_vector) {
3306 vect++;
3307 j = 0;
3308 }
3309 qdev->rx_ring[i].irq = vect;
3310 j++;
3311 }
3312 } else {
3313 /* For single vector all rings have an irq
3314 * of zero.
3315 */
3316 for (i = 0; i < qdev->rx_ring_count; i++)
3317 qdev->rx_ring[i].irq = 0;
3318 }
3319}
3320
3321/* Set the interrupt mask for this vector. Each vector
3322 * will service 1 RSS ring and 1 or more TX completion
3323 * rings. This function sets up a bit mask per vector
3324 * that indicates which rings it services.
3325 */
3326static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3327{
3328 int j, vect = ctx->intr;
3329 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3330
3331 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3332 /* Add the RSS ring serviced by this vector
3333 * to the mask.
3334 */
3335 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3336 /* Add the TX ring(s) serviced by this vector
3337 * to the mask. */
3338 for (j = 0; j < tx_rings_per_vector; j++) {
3339 ctx->irq_mask |=
3340 (1 << qdev->rx_ring[qdev->rss_ring_count +
3341 (vect * tx_rings_per_vector) + j].cq_id);
3342 }
3343 } else {
3344 /* For single vector we just shift each queue's
3345 * ID into the mask.
3346 */
3347 for (j = 0; j < qdev->rx_ring_count; j++)
3348 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3349 }
3350}
3351
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003352/*
3353 * Here we build the intr_context structures based on
3354 * our rx_ring count and intr vector count.
3355 * The intr_context structure is used to hook each vector
3356 * to possibly different handlers.
3357 */
3358static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3359{
3360 int i = 0;
3361 struct intr_context *intr_context = &qdev->intr_context[0];
3362
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003363 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3364 /* Each rx_ring has it's
3365 * own intr_context since we have separate
3366 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003367 */
3368 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3369 qdev->rx_ring[i].irq = i;
3370 intr_context->intr = i;
3371 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003372 /* Set up this vector's bit-mask that indicates
3373 * which queues it services.
3374 */
3375 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003376 /*
3377 * We set up each vectors enable/disable/read bits so
3378 * there's no bit/mask calculations in the critical path.
3379 */
3380 intr_context->intr_en_mask =
3381 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3382 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3383 | i;
3384 intr_context->intr_dis_mask =
3385 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3386 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3387 INTR_EN_IHD | i;
3388 intr_context->intr_read_mask =
3389 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3390 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3391 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003392 if (i == 0) {
3393 /* The first vector/queue handles
3394 * broadcast/multicast, fatal errors,
3395 * and firmware events. This in addition
3396 * to normal inbound NAPI processing.
3397 */
3398 intr_context->handler = qlge_isr;
3399 sprintf(intr_context->name, "%s-rx-%d",
3400 qdev->ndev->name, i);
3401 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003402 /*
3403 * Inbound queues handle unicast frames only.
3404 */
3405 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003406 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003407 qdev->ndev->name, i);
3408 }
3409 }
3410 } else {
3411 /*
3412 * All rx_rings use the same intr_context since
3413 * there is only one vector.
3414 */
3415 intr_context->intr = 0;
3416 intr_context->qdev = qdev;
3417 /*
3418 * We set up each vectors enable/disable/read bits so
3419 * there's no bit/mask calculations in the critical path.
3420 */
3421 intr_context->intr_en_mask =
3422 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3423 intr_context->intr_dis_mask =
3424 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3425 INTR_EN_TYPE_DISABLE;
3426 intr_context->intr_read_mask =
3427 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3428 /*
3429 * Single interrupt means one handler for all rings.
3430 */
3431 intr_context->handler = qlge_isr;
3432 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003433 /* Set up this vector's bit-mask that indicates
3434 * which queues it services. In this case there is
3435 * a single vector so it will service all RSS and
3436 * TX completion rings.
3437 */
3438 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003439 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003440 /* Tell the TX completion rings which MSIx vector
3441 * they will be using.
3442 */
3443 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003444}
3445
3446static void ql_free_irq(struct ql_adapter *qdev)
3447{
3448 int i;
3449 struct intr_context *intr_context = &qdev->intr_context[0];
3450
3451 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3452 if (intr_context->hooked) {
3453 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3454 free_irq(qdev->msi_x_entry[i].vector,
3455 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003456 } else {
3457 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003458 }
3459 }
3460 }
3461 ql_disable_msix(qdev);
3462}
3463
3464static int ql_request_irq(struct ql_adapter *qdev)
3465{
3466 int i;
3467 int status = 0;
3468 struct pci_dev *pdev = qdev->pdev;
3469 struct intr_context *intr_context = &qdev->intr_context[0];
3470
3471 ql_resolve_queues_to_irqs(qdev);
3472
3473 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3474 atomic_set(&intr_context->irq_cnt, 0);
3475 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3476 status = request_irq(qdev->msi_x_entry[i].vector,
3477 intr_context->handler,
3478 0,
3479 intr_context->name,
3480 &qdev->rx_ring[i]);
3481 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003482 netif_err(qdev, ifup, qdev->ndev,
3483 "Failed request for MSIX interrupt %d.\n",
3484 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003485 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003486 }
3487 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003488 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3489 "trying msi or legacy interrupts.\n");
3490 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3491 "%s: irq = %d.\n", __func__, pdev->irq);
3492 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3493 "%s: context->name = %s.\n", __func__,
3494 intr_context->name);
3495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "%s: dev_id = 0x%p.\n", __func__,
3497 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003498 status =
3499 request_irq(pdev->irq, qlge_isr,
3500 test_bit(QL_MSI_ENABLED,
3501 &qdev->
3502 flags) ? 0 : IRQF_SHARED,
3503 intr_context->name, &qdev->rx_ring[0]);
3504 if (status)
3505 goto err_irq;
3506
Joe Perchesae9540f72010-02-09 11:49:52 +00003507 netif_err(qdev, ifup, qdev->ndev,
3508 "Hooked intr %d, queue type %s, with name %s.\n",
3509 i,
3510 qdev->rx_ring[0].type == DEFAULT_Q ?
3511 "DEFAULT_Q" :
3512 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3513 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3514 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003515 }
3516 intr_context->hooked = 1;
3517 }
3518 return status;
3519err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003520 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003521 ql_free_irq(qdev);
3522 return status;
3523}
3524
3525static int ql_start_rss(struct ql_adapter *qdev)
3526{
Joe Perches215faf92010-12-21 02:16:10 -08003527 static const u8 init_hash_seed[] = {
3528 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3529 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3530 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3531 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3532 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3533 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003534 struct ricb *ricb = &qdev->ricb;
3535 int status = 0;
3536 int i;
3537 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3538
Ron Mercere3324712009-07-02 06:06:13 +00003539 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540
Ron Mercerb2014ff2009-08-27 11:02:09 +00003541 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003542 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003543 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3544 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003545
3546 /*
3547 * Fill out the Indirection Table.
3548 */
Ron Mercer541ae282009-10-08 09:54:37 +00003549 for (i = 0; i < 1024; i++)
3550 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003551
Ron Mercer541ae282009-10-08 09:54:37 +00003552 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3553 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003554
Ron Mercere3324712009-07-02 06:06:13 +00003555 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003556 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003557 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003558 return status;
3559 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003560 return status;
3561}
3562
Ron Mercera5f59dc2009-07-02 06:06:07 +00003563static int ql_clear_routing_entries(struct ql_adapter *qdev)
3564{
3565 int i, status = 0;
3566
3567 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3568 if (status)
3569 return status;
3570 /* Clear all the entries in the routing table. */
3571 for (i = 0; i < 16; i++) {
3572 status = ql_set_routing_reg(qdev, i, 0, 0);
3573 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003574 netif_err(qdev, ifup, qdev->ndev,
3575 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003576 break;
3577 }
3578 }
3579 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3580 return status;
3581}
3582
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003583/* Initialize the frame-to-queue routing. */
3584static int ql_route_initialize(struct ql_adapter *qdev)
3585{
3586 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003587
3588 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003589 status = ql_clear_routing_entries(qdev);
3590 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003591 return status;
3592
3593 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3594 if (status)
3595 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003596
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003597 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3598 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003599 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003600 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003601 "Failed to init routing register "
3602 "for IP CSUM error packets.\n");
3603 goto exit;
3604 }
3605 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3606 RT_IDX_TU_CSUM_ERR, 1);
3607 if (status) {
3608 netif_err(qdev, ifup, qdev->ndev,
3609 "Failed to init routing register "
3610 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003611 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003612 }
3613 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3614 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003615 netif_err(qdev, ifup, qdev->ndev,
3616 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003617 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003618 }
3619 /* If we have more than one inbound queue, then turn on RSS in the
3620 * routing block.
3621 */
3622 if (qdev->rss_ring_count > 1) {
3623 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3624 RT_IDX_RSS_MATCH, 1);
3625 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003626 netif_err(qdev, ifup, qdev->ndev,
3627 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003628 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003629 }
3630 }
3631
3632 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3633 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003634 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003635 netif_err(qdev, ifup, qdev->ndev,
3636 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003637exit:
3638 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003639 return status;
3640}
3641
Ron Mercer2ee1e272009-03-03 12:10:33 +00003642int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003643{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003644 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003645
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003646 /* If check if the link is up and use to
3647 * determine if we are setting or clearing
3648 * the MAC address in the CAM.
3649 */
3650 set = ql_read32(qdev, STS);
3651 set &= qdev->port_link_up;
3652 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003653 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003654 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003655 return status;
3656 }
3657
3658 status = ql_route_initialize(qdev);
3659 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003660 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003661
3662 return status;
3663}
3664
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003665static int ql_adapter_initialize(struct ql_adapter *qdev)
3666{
3667 u32 value, mask;
3668 int i;
3669 int status = 0;
3670
3671 /*
3672 * Set up the System register to halt on errors.
3673 */
3674 value = SYS_EFE | SYS_FAE;
3675 mask = value << 16;
3676 ql_write32(qdev, SYS, mask | value);
3677
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003678 /* Set the default queue, and VLAN behavior. */
3679 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3680 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003681 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3682
3683 /* Set the MPI interrupt to enabled. */
3684 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3685
3686 /* Enable the function, set pagesize, enable error checking. */
3687 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003688 FSC_EC | FSC_VM_PAGE_4K;
3689 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003690
3691 /* Set/clear header splitting. */
3692 mask = FSC_VM_PAGESIZE_MASK |
3693 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3694 ql_write32(qdev, FSC, mask | value);
3695
Ron Mercer572c5262010-01-02 10:37:42 +00003696 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003697
Ron Mercera3b71932009-10-08 09:54:38 +00003698 /* Set RX packet routing to use port/pci function on which the
3699 * packet arrived on in addition to usual frame routing.
3700 * This is helpful on bonding where both interfaces can have
3701 * the same MAC address.
3702 */
3703 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003704 /* Reroute all packets to our Interface.
3705 * They may have been routed to MPI firmware
3706 * due to WOL.
3707 */
3708 value = ql_read32(qdev, MGMT_RCV_CFG);
3709 value &= ~MGMT_RCV_CFG_RM;
3710 mask = 0xffff0000;
3711
3712 /* Sticky reg needs clearing due to WOL. */
3713 ql_write32(qdev, MGMT_RCV_CFG, mask);
3714 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3715
3716 /* Default WOL is enable on Mezz cards */
3717 if (qdev->pdev->subsystem_device == 0x0068 ||
3718 qdev->pdev->subsystem_device == 0x0180)
3719 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003720
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003721 /* Start up the rx queues. */
3722 for (i = 0; i < qdev->rx_ring_count; i++) {
3723 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3724 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003725 netif_err(qdev, ifup, qdev->ndev,
3726 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003727 return status;
3728 }
3729 }
3730
3731 /* If there is more than one inbound completion queue
3732 * then download a RICB to configure RSS.
3733 */
3734 if (qdev->rss_ring_count > 1) {
3735 status = ql_start_rss(qdev);
3736 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003737 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003738 return status;
3739 }
3740 }
3741
3742 /* Start up the tx queues. */
3743 for (i = 0; i < qdev->tx_ring_count; i++) {
3744 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3745 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003746 netif_err(qdev, ifup, qdev->ndev,
3747 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003748 return status;
3749 }
3750 }
3751
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003752 /* Initialize the port and set the max framesize. */
3753 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003754 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003755 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003756
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003757 /* Set up the MAC address and frame routing filter. */
3758 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003759 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003760 netif_err(qdev, ifup, qdev->ndev,
3761 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003762 return status;
3763 }
3764
3765 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003766 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003767 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003768
3769 return status;
3770}
3771
3772/* Issue soft reset to chip. */
3773static int ql_adapter_reset(struct ql_adapter *qdev)
3774{
3775 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003776 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003777 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003778
Ron Mercera5f59dc2009-07-02 06:06:07 +00003779 /* Clear all the entries in the routing table. */
3780 status = ql_clear_routing_entries(qdev);
3781 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003782 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003783 return status;
3784 }
3785
3786 end_jiffies = jiffies +
3787 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003788
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003789 /* Check if bit is set then skip the mailbox command and
3790 * clear the bit, else we are in normal reset process.
3791 */
3792 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3793 /* Stop management traffic. */
3794 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003795
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003796 /* Wait for the NIC and MGMNT FIFOs to empty. */
3797 ql_wait_fifo_empty(qdev);
3798 } else
3799 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003800
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003801 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003802
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003803 do {
3804 value = ql_read32(qdev, RST_FO);
3805 if ((value & RST_FO_FR) == 0)
3806 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003807 cpu_relax();
3808 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003809
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003810 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003811 netif_err(qdev, ifdown, qdev->ndev,
3812 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003813 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003814 }
3815
Ron Mercer84087f42009-10-08 09:54:41 +00003816 /* Resume management traffic. */
3817 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003818 return status;
3819}
3820
3821static void ql_display_dev_info(struct net_device *ndev)
3822{
Joe Perchesb16fed02010-11-15 11:12:28 +00003823 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003824
Joe Perchesae9540f72010-02-09 11:49:52 +00003825 netif_info(qdev, probe, qdev->ndev,
3826 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3827 "XG Roll = %d, XG Rev = %d.\n",
3828 qdev->func,
3829 qdev->port,
3830 qdev->chip_rev_id & 0x0000000f,
3831 qdev->chip_rev_id >> 4 & 0x0000000f,
3832 qdev->chip_rev_id >> 8 & 0x0000000f,
3833 qdev->chip_rev_id >> 12 & 0x0000000f);
3834 netif_info(qdev, probe, qdev->ndev,
3835 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003836}
3837
stephen hemmingerac409212010-10-21 07:50:54 +00003838static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003839{
3840 int status = 0;
3841 u32 wol = MB_WOL_DISABLE;
3842
3843 /* The CAM is still intact after a reset, but if we
3844 * are doing WOL, then we may need to program the
3845 * routing regs. We would also need to issue the mailbox
3846 * commands to instruct the MPI what to do per the ethtool
3847 * settings.
3848 */
3849
3850 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3851 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003852 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003853 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003854 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003855 return -EINVAL;
3856 }
3857
3858 if (qdev->wol & WAKE_MAGIC) {
3859 status = ql_mb_wol_set_magic(qdev, 1);
3860 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003861 netif_err(qdev, ifdown, qdev->ndev,
3862 "Failed to set magic packet on %s.\n",
3863 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003864 return status;
3865 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003866 netif_info(qdev, drv, qdev->ndev,
3867 "Enabled magic packet successfully on %s.\n",
3868 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003869
3870 wol |= MB_WOL_MAGIC_PKT;
3871 }
3872
3873 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003874 wol |= MB_WOL_MODE_ON;
3875 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003876 netif_err(qdev, drv, qdev->ndev,
3877 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003878 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003879 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003880 }
3881
3882 return status;
3883}
3884
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003885static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003886{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003887
Ron Mercer6497b602009-02-12 16:37:13 -08003888 /* Don't kill the reset worker thread if we
3889 * are in the process of recovery.
3890 */
3891 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3892 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003893 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3894 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003895 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003896 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003897 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003898}
3899
3900static int ql_adapter_down(struct ql_adapter *qdev)
3901{
3902 int i, status = 0;
3903
3904 ql_link_off(qdev);
3905
3906 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003907
Ron Mercer39aa8162009-08-27 11:02:11 +00003908 for (i = 0; i < qdev->rss_ring_count; i++)
3909 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003910
3911 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3912
3913 ql_disable_interrupts(qdev);
3914
3915 ql_tx_ring_clean(qdev);
3916
Ron Mercer6b318cb2009-03-09 10:59:26 +00003917 /* Call netif_napi_del() from common point.
3918 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003919 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003920 netif_napi_del(&qdev->rx_ring[i].napi);
3921
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003922 status = ql_adapter_reset(qdev);
3923 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003924 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3925 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003926 ql_free_rx_buffers(qdev);
3927
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003928 return status;
3929}
3930
3931static int ql_adapter_up(struct ql_adapter *qdev)
3932{
3933 int err = 0;
3934
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003935 err = ql_adapter_initialize(qdev);
3936 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003937 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003938 goto err_init;
3939 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003940 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003941 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003942 /* If the port is initialized and the
3943 * link is up the turn on the carrier.
3944 */
3945 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3946 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003947 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003948 /* Restore rx mode. */
3949 clear_bit(QL_ALLMULTI, &qdev->flags);
3950 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3951 qlge_set_multicast_list(qdev->ndev);
3952
Ron Mercerc1b60092010-10-27 04:58:12 +00003953 /* Restore vlan setting. */
3954 qlge_restore_vlan(qdev);
3955
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003956 ql_enable_interrupts(qdev);
3957 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003958 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003959
3960 return 0;
3961err_init:
3962 ql_adapter_reset(qdev);
3963 return err;
3964}
3965
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003966static void ql_release_adapter_resources(struct ql_adapter *qdev)
3967{
3968 ql_free_mem_resources(qdev);
3969 ql_free_irq(qdev);
3970}
3971
3972static int ql_get_adapter_resources(struct ql_adapter *qdev)
3973{
3974 int status = 0;
3975
3976 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003977 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003978 return -ENOMEM;
3979 }
3980 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003981 return status;
3982}
3983
3984static int qlge_close(struct net_device *ndev)
3985{
3986 struct ql_adapter *qdev = netdev_priv(ndev);
3987
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003988 /* If we hit pci_channel_io_perm_failure
3989 * failure condition, then we already
3990 * brought the adapter down.
3991 */
3992 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003993 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003994 clear_bit(QL_EEH_FATAL, &qdev->flags);
3995 return 0;
3996 }
3997
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003998 /*
3999 * Wait for device to recover from a reset.
4000 * (Rarely happens, but possible.)
4001 */
4002 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4003 msleep(1);
4004 ql_adapter_down(qdev);
4005 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004006 return 0;
4007}
4008
4009static int ql_configure_rings(struct ql_adapter *qdev)
4010{
4011 int i;
4012 struct rx_ring *rx_ring;
4013 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004014 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004015 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4016 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4017
4018 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004019
Ron Mercera4ab6132009-08-27 11:02:10 +00004020 /* In a perfect world we have one RSS ring for each CPU
4021 * and each has it's own vector. To do that we ask for
4022 * cpu_cnt vectors. ql_enable_msix() will adjust the
4023 * vector count to what we actually get. We then
4024 * allocate an RSS ring for each.
4025 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004026 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004027 qdev->intr_count = cpu_cnt;
4028 ql_enable_msix(qdev);
4029 /* Adjust the RSS ring count to the actual vector count. */
4030 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004031 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004032 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004033
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004034 for (i = 0; i < qdev->tx_ring_count; i++) {
4035 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004036 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004037 tx_ring->qdev = qdev;
4038 tx_ring->wq_id = i;
4039 tx_ring->wq_len = qdev->tx_ring_size;
4040 tx_ring->wq_size =
4041 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4042
4043 /*
4044 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004045 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004046 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004047 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004048 }
4049
4050 for (i = 0; i < qdev->rx_ring_count; i++) {
4051 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004052 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004053 rx_ring->qdev = qdev;
4054 rx_ring->cq_id = i;
4055 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004056 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004057 /*
4058 * Inbound (RSS) queues.
4059 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004060 rx_ring->cq_len = qdev->rx_ring_size;
4061 rx_ring->cq_size =
4062 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4063 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4064 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004065 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004066 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004067 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4068 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004069 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004070 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004071 rx_ring->type = RX_Q;
4072 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004073 /*
4074 * Outbound queue handles outbound completions only.
4075 */
4076 /* outbound cq is same size as tx_ring it services. */
4077 rx_ring->cq_len = qdev->tx_ring_size;
4078 rx_ring->cq_size =
4079 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4080 rx_ring->lbq_len = 0;
4081 rx_ring->lbq_size = 0;
4082 rx_ring->lbq_buf_size = 0;
4083 rx_ring->sbq_len = 0;
4084 rx_ring->sbq_size = 0;
4085 rx_ring->sbq_buf_size = 0;
4086 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004087 }
4088 }
4089 return 0;
4090}
4091
4092static int qlge_open(struct net_device *ndev)
4093{
4094 int err = 0;
4095 struct ql_adapter *qdev = netdev_priv(ndev);
4096
Ron Mercer74e12432009-11-11 12:54:04 +00004097 err = ql_adapter_reset(qdev);
4098 if (err)
4099 return err;
4100
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004101 err = ql_configure_rings(qdev);
4102 if (err)
4103 return err;
4104
4105 err = ql_get_adapter_resources(qdev);
4106 if (err)
4107 goto error_up;
4108
4109 err = ql_adapter_up(qdev);
4110 if (err)
4111 goto error_up;
4112
4113 return err;
4114
4115error_up:
4116 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004117 return err;
4118}
4119
Ron Mercer7c734352009-10-19 03:32:19 +00004120static int ql_change_rx_buffers(struct ql_adapter *qdev)
4121{
4122 struct rx_ring *rx_ring;
4123 int i, status;
4124 u32 lbq_buf_len;
4125
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004126 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004127 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4128 int i = 3;
4129 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004130 netif_err(qdev, ifup, qdev->ndev,
4131 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004132 ssleep(1);
4133 }
4134
4135 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004136 netif_err(qdev, ifup, qdev->ndev,
4137 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004138 return -ETIMEDOUT;
4139 }
4140 }
4141
4142 status = ql_adapter_down(qdev);
4143 if (status)
4144 goto error;
4145
4146 /* Get the new rx buffer size. */
4147 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4148 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4149 qdev->lbq_buf_order = get_order(lbq_buf_len);
4150
4151 for (i = 0; i < qdev->rss_ring_count; i++) {
4152 rx_ring = &qdev->rx_ring[i];
4153 /* Set the new size. */
4154 rx_ring->lbq_buf_size = lbq_buf_len;
4155 }
4156
4157 status = ql_adapter_up(qdev);
4158 if (status)
4159 goto error;
4160
4161 return status;
4162error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004163 netif_alert(qdev, ifup, qdev->ndev,
4164 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004165 set_bit(QL_ADAPTER_UP, &qdev->flags);
4166 dev_close(qdev->ndev);
4167 return status;
4168}
4169
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004170static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4171{
4172 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004173 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004174
4175 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004176 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004177 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004178 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004179 } else
4180 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004181
4182 queue_delayed_work(qdev->workqueue,
4183 &qdev->mpi_port_cfg_work, 3*HZ);
4184
Breno Leitao746079d2010-02-04 10:11:19 +00004185 ndev->mtu = new_mtu;
4186
Ron Mercer7c734352009-10-19 03:32:19 +00004187 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004188 return 0;
4189 }
4190
Ron Mercer7c734352009-10-19 03:32:19 +00004191 status = ql_change_rx_buffers(qdev);
4192 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004193 netif_err(qdev, ifup, qdev->ndev,
4194 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004195 }
4196
4197 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004198}
4199
4200static struct net_device_stats *qlge_get_stats(struct net_device
4201 *ndev)
4202{
Ron Mercer885ee392009-11-03 13:49:31 +00004203 struct ql_adapter *qdev = netdev_priv(ndev);
4204 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4205 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4206 unsigned long pkts, mcast, dropped, errors, bytes;
4207 int i;
4208
4209 /* Get RX stats. */
4210 pkts = mcast = dropped = errors = bytes = 0;
4211 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4212 pkts += rx_ring->rx_packets;
4213 bytes += rx_ring->rx_bytes;
4214 dropped += rx_ring->rx_dropped;
4215 errors += rx_ring->rx_errors;
4216 mcast += rx_ring->rx_multicast;
4217 }
4218 ndev->stats.rx_packets = pkts;
4219 ndev->stats.rx_bytes = bytes;
4220 ndev->stats.rx_dropped = dropped;
4221 ndev->stats.rx_errors = errors;
4222 ndev->stats.multicast = mcast;
4223
4224 /* Get TX stats. */
4225 pkts = errors = bytes = 0;
4226 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4227 pkts += tx_ring->tx_packets;
4228 bytes += tx_ring->tx_bytes;
4229 errors += tx_ring->tx_errors;
4230 }
4231 ndev->stats.tx_packets = pkts;
4232 ndev->stats.tx_bytes = bytes;
4233 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004234 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004235}
4236
stephen hemmingerac409212010-10-21 07:50:54 +00004237static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004238{
Joe Perchesb16fed02010-11-15 11:12:28 +00004239 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004240 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004241 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004242
Ron Mercercc288f52009-02-23 10:42:14 +00004243 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4244 if (status)
4245 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004246 /*
4247 * Set or clear promiscuous mode if a
4248 * transition is taking place.
4249 */
4250 if (ndev->flags & IFF_PROMISC) {
4251 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4252 if (ql_set_routing_reg
4253 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004254 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004255 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004256 } else {
4257 set_bit(QL_PROMISCUOUS, &qdev->flags);
4258 }
4259 }
4260 } else {
4261 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4262 if (ql_set_routing_reg
4263 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004264 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004265 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004266 } else {
4267 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4268 }
4269 }
4270 }
4271
4272 /*
4273 * Set or clear all multicast mode if a
4274 * transition is taking place.
4275 */
4276 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004277 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004278 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4279 if (ql_set_routing_reg
4280 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004281 netif_err(qdev, hw, qdev->ndev,
4282 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004283 } else {
4284 set_bit(QL_ALLMULTI, &qdev->flags);
4285 }
4286 }
4287 } else {
4288 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4289 if (ql_set_routing_reg
4290 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004291 netif_err(qdev, hw, qdev->ndev,
4292 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004293 } else {
4294 clear_bit(QL_ALLMULTI, &qdev->flags);
4295 }
4296 }
4297 }
4298
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004299 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004300 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4301 if (status)
4302 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004303 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004304 netdev_for_each_mc_addr(ha, ndev) {
4305 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004306 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004307 netif_err(qdev, hw, qdev->ndev,
4308 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004309 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004310 goto exit;
4311 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004312 i++;
4313 }
Ron Mercercc288f52009-02-23 10:42:14 +00004314 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004315 if (ql_set_routing_reg
4316 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004317 netif_err(qdev, hw, qdev->ndev,
4318 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004319 } else {
4320 set_bit(QL_ALLMULTI, &qdev->flags);
4321 }
4322 }
4323exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004324 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004325}
4326
4327static int qlge_set_mac_address(struct net_device *ndev, void *p)
4328{
Joe Perchesb16fed02010-11-15 11:12:28 +00004329 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004330 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004331 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004332
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333 if (!is_valid_ether_addr(addr->sa_data))
4334 return -EADDRNOTAVAIL;
4335 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004336 /* Update local copy of current mac address. */
4337 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004338
Ron Mercercc288f52009-02-23 10:42:14 +00004339 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4340 if (status)
4341 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004342 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4343 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004344 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004345 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004346 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4347 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004348}
4349
4350static void qlge_tx_timeout(struct net_device *ndev)
4351{
Joe Perchesb16fed02010-11-15 11:12:28 +00004352 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004353 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004354}
4355
4356static void ql_asic_reset_work(struct work_struct *work)
4357{
4358 struct ql_adapter *qdev =
4359 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004360 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004361 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004362 status = ql_adapter_down(qdev);
4363 if (status)
4364 goto error;
4365
4366 status = ql_adapter_up(qdev);
4367 if (status)
4368 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004369
4370 /* Restore rx mode. */
4371 clear_bit(QL_ALLMULTI, &qdev->flags);
4372 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4373 qlge_set_multicast_list(qdev->ndev);
4374
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004375 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004376 return;
4377error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004378 netif_alert(qdev, ifup, qdev->ndev,
4379 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004380
Ron Mercerdb988122009-03-09 10:59:17 +00004381 set_bit(QL_ADAPTER_UP, &qdev->flags);
4382 dev_close(qdev->ndev);
4383 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004384}
4385
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004386static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004387 .get_flash = ql_get_8012_flash_params,
4388 .port_initialize = ql_8012_port_initialize,
4389};
4390
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004391static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004392 .get_flash = ql_get_8000_flash_params,
4393 .port_initialize = ql_8000_port_initialize,
4394};
4395
Ron Mercere4552f52009-06-09 05:39:32 +00004396/* Find the pcie function number for the other NIC
4397 * on this chip. Since both NIC functions share a
4398 * common firmware we have the lowest enabled function
4399 * do any common work. Examples would be resetting
4400 * after a fatal firmware error, or doing a firmware
4401 * coredump.
4402 */
4403static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004404{
Ron Mercere4552f52009-06-09 05:39:32 +00004405 int status = 0;
4406 u32 temp;
4407 u32 nic_func1, nic_func2;
4408
4409 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4410 &temp);
4411 if (status)
4412 return status;
4413
4414 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4415 MPI_TEST_NIC_FUNC_MASK);
4416 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4417 MPI_TEST_NIC_FUNC_MASK);
4418
4419 if (qdev->func == nic_func1)
4420 qdev->alt_func = nic_func2;
4421 else if (qdev->func == nic_func2)
4422 qdev->alt_func = nic_func1;
4423 else
4424 status = -EIO;
4425
4426 return status;
4427}
4428
4429static int ql_get_board_info(struct ql_adapter *qdev)
4430{
4431 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004432 qdev->func =
4433 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004434 if (qdev->func > 3)
4435 return -EIO;
4436
4437 status = ql_get_alt_pcie_func(qdev);
4438 if (status)
4439 return status;
4440
4441 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4442 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004443 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4444 qdev->port_link_up = STS_PL1;
4445 qdev->port_init = STS_PI1;
4446 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4447 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4448 } else {
4449 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4450 qdev->port_link_up = STS_PL0;
4451 qdev->port_init = STS_PI0;
4452 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4453 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4454 }
4455 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004456 qdev->device_id = qdev->pdev->device;
4457 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4458 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004459 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4460 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004461 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004462}
4463
4464static void ql_release_all(struct pci_dev *pdev)
4465{
4466 struct net_device *ndev = pci_get_drvdata(pdev);
4467 struct ql_adapter *qdev = netdev_priv(ndev);
4468
4469 if (qdev->workqueue) {
4470 destroy_workqueue(qdev->workqueue);
4471 qdev->workqueue = NULL;
4472 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004473
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004474 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004475 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004476 if (qdev->doorbell_area)
4477 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004478 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004479 pci_release_regions(pdev);
4480 pci_set_drvdata(pdev, NULL);
4481}
4482
4483static int __devinit ql_init_device(struct pci_dev *pdev,
4484 struct net_device *ndev, int cards_found)
4485{
4486 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004487 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004488
Ron Mercere3324712009-07-02 06:06:13 +00004489 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004490 err = pci_enable_device(pdev);
4491 if (err) {
4492 dev_err(&pdev->dev, "PCI device enable failed.\n");
4493 return err;
4494 }
4495
Ron Mercerebd6e772009-09-29 08:39:25 +00004496 qdev->ndev = ndev;
4497 qdev->pdev = pdev;
4498 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004499
Ron Mercerbc9167f2009-10-10 09:35:04 +00004500 /* Set PCIe read request size */
4501 err = pcie_set_readrq(pdev, 4096);
4502 if (err) {
4503 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004504 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004505 }
4506
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004507 err = pci_request_regions(pdev, DRV_NAME);
4508 if (err) {
4509 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004510 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004511 }
4512
4513 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004514 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004515 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004516 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004517 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004518 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004519 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004520 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004521 }
4522
4523 if (err) {
4524 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004525 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004526 }
4527
Ron Mercer73475332009-11-06 07:44:58 +00004528 /* Set PCIe reset type for EEH to fundamental. */
4529 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004530 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004531 qdev->reg_base =
4532 ioremap_nocache(pci_resource_start(pdev, 1),
4533 pci_resource_len(pdev, 1));
4534 if (!qdev->reg_base) {
4535 dev_err(&pdev->dev, "Register mapping failed.\n");
4536 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004537 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004538 }
4539
4540 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4541 qdev->doorbell_area =
4542 ioremap_nocache(pci_resource_start(pdev, 3),
4543 pci_resource_len(pdev, 3));
4544 if (!qdev->doorbell_area) {
4545 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4546 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004547 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004548 }
4549
Ron Mercere4552f52009-06-09 05:39:32 +00004550 err = ql_get_board_info(qdev);
4551 if (err) {
4552 dev_err(&pdev->dev, "Register access failed.\n");
4553 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004554 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004555 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004556 qdev->msg_enable = netif_msg_init(debug, default_msg);
4557 spin_lock_init(&qdev->hw_lock);
4558 spin_lock_init(&qdev->stats_lock);
4559
Ron Mercer8aae2602010-01-15 13:31:28 +00004560 if (qlge_mpi_coredump) {
4561 qdev->mpi_coredump =
4562 vmalloc(sizeof(struct ql_mpi_coredump));
4563 if (qdev->mpi_coredump == NULL) {
4564 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4565 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004566 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004567 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004568 if (qlge_force_coredump)
4569 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004570 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004572 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004573 if (err) {
4574 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004575 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576 }
4577
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004579 /* Keep local copy of current mac address. */
4580 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004581
4582 /* Set up the default ring sizes. */
4583 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4584 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4585
4586 /* Set up the coalescing parameters. */
4587 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4588 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4589 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4590 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4591
4592 /*
4593 * Set up the operating parameters.
4594 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004595 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4596 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4597 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4598 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004599 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004600 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004601 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004602 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004603 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004604
4605 if (!cards_found) {
4606 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4607 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4608 DRV_NAME, DRV_VERSION);
4609 }
4610 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004611err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004612 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004613err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004614 pci_disable_device(pdev);
4615 return err;
4616}
4617
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004618static const struct net_device_ops qlge_netdev_ops = {
4619 .ndo_open = qlge_open,
4620 .ndo_stop = qlge_close,
4621 .ndo_start_xmit = qlge_send,
4622 .ndo_change_mtu = qlge_change_mtu,
4623 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004624 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004625 .ndo_set_mac_address = qlge_set_mac_address,
4626 .ndo_validate_addr = eth_validate_addr,
4627 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004628 .ndo_fix_features = qlge_fix_features,
4629 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004630 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4631 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004632};
4633
Ron Mercer15c052f2010-02-04 13:32:46 -08004634static void ql_timer(unsigned long data)
4635{
4636 struct ql_adapter *qdev = (struct ql_adapter *)data;
4637 u32 var = 0;
4638
4639 var = ql_read32(qdev, STS);
4640 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004641 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004642 return;
4643 }
4644
Breno Leitao72046d82010-07-01 03:00:17 +00004645 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004646}
4647
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004648static int __devinit qlge_probe(struct pci_dev *pdev,
4649 const struct pci_device_id *pci_entry)
4650{
4651 struct net_device *ndev = NULL;
4652 struct ql_adapter *qdev = NULL;
4653 static int cards_found = 0;
4654 int err = 0;
4655
Ron Mercer1e213302009-03-09 10:59:21 +00004656 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004657 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004658 if (!ndev)
4659 return -ENOMEM;
4660
4661 err = ql_init_device(pdev, ndev, cards_found);
4662 if (err < 0) {
4663 free_netdev(ndev);
4664 return err;
4665 }
4666
4667 qdev = netdev_priv(ndev);
4668 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004669 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4670 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4671 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4672 ndev->features = ndev->hw_features |
4673 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004674
4675 if (test_bit(QL_DMA64, &qdev->flags))
4676 ndev->features |= NETIF_F_HIGHDMA;
4677
4678 /*
4679 * Set up net_device structure.
4680 */
4681 ndev->tx_queue_len = qdev->tx_ring_size;
4682 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004683
4684 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004685 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004686 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004687
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004688 err = register_netdev(ndev);
4689 if (err) {
4690 dev_err(&pdev->dev, "net device registration failed.\n");
4691 ql_release_all(pdev);
4692 pci_disable_device(pdev);
4693 return err;
4694 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004695 /* Start up the timer to trigger EEH if
4696 * the bus goes dead
4697 */
4698 init_timer_deferrable(&qdev->timer);
4699 qdev->timer.data = (unsigned long)qdev;
4700 qdev->timer.function = ql_timer;
4701 qdev->timer.expires = jiffies + (5*HZ);
4702 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004703 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004704 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004705 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004706 cards_found++;
4707 return 0;
4708}
4709
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004710netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4711{
4712 return qlge_send(skb, ndev);
4713}
4714
4715int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4716{
4717 return ql_clean_inbound_rx_ring(rx_ring, budget);
4718}
4719
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004720static void __devexit qlge_remove(struct pci_dev *pdev)
4721{
4722 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004723 struct ql_adapter *qdev = netdev_priv(ndev);
4724 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004725 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004726 unregister_netdev(ndev);
4727 ql_release_all(pdev);
4728 pci_disable_device(pdev);
4729 free_netdev(ndev);
4730}
4731
Ron Mercer6d190c62009-10-28 08:39:20 +00004732/* Clean up resources without touching hardware. */
4733static void ql_eeh_close(struct net_device *ndev)
4734{
4735 int i;
4736 struct ql_adapter *qdev = netdev_priv(ndev);
4737
4738 if (netif_carrier_ok(ndev)) {
4739 netif_carrier_off(ndev);
4740 netif_stop_queue(ndev);
4741 }
4742
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004743 /* Disabling the timer */
4744 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004745 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004746
4747 for (i = 0; i < qdev->rss_ring_count; i++)
4748 netif_napi_del(&qdev->rx_ring[i].napi);
4749
4750 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4751 ql_tx_ring_clean(qdev);
4752 ql_free_rx_buffers(qdev);
4753 ql_release_adapter_resources(qdev);
4754}
4755
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004756/*
4757 * This callback is called by the PCI subsystem whenever
4758 * a PCI bus error is detected.
4759 */
4760static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4761 enum pci_channel_state state)
4762{
4763 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004764 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004765
Ron Mercer6d190c62009-10-28 08:39:20 +00004766 switch (state) {
4767 case pci_channel_io_normal:
4768 return PCI_ERS_RESULT_CAN_RECOVER;
4769 case pci_channel_io_frozen:
4770 netif_device_detach(ndev);
4771 if (netif_running(ndev))
4772 ql_eeh_close(ndev);
4773 pci_disable_device(pdev);
4774 return PCI_ERS_RESULT_NEED_RESET;
4775 case pci_channel_io_perm_failure:
4776 dev_err(&pdev->dev,
4777 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004778 ql_eeh_close(ndev);
4779 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004780 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004781 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004782
4783 /* Request a slot reset. */
4784 return PCI_ERS_RESULT_NEED_RESET;
4785}
4786
4787/*
4788 * This callback is called after the PCI buss has been reset.
4789 * Basically, this tries to restart the card from scratch.
4790 * This is a shortened version of the device probe/discovery code,
4791 * it resembles the first-half of the () routine.
4792 */
4793static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4794{
4795 struct net_device *ndev = pci_get_drvdata(pdev);
4796 struct ql_adapter *qdev = netdev_priv(ndev);
4797
Ron Mercer6d190c62009-10-28 08:39:20 +00004798 pdev->error_state = pci_channel_io_normal;
4799
4800 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004801 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004802 netif_err(qdev, ifup, qdev->ndev,
4803 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004804 return PCI_ERS_RESULT_DISCONNECT;
4805 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004806 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004807
4808 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004809 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004810 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004811 return PCI_ERS_RESULT_DISCONNECT;
4812 }
4813
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004814 return PCI_ERS_RESULT_RECOVERED;
4815}
4816
4817static void qlge_io_resume(struct pci_dev *pdev)
4818{
4819 struct net_device *ndev = pci_get_drvdata(pdev);
4820 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004821 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004822
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004823 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004824 err = qlge_open(ndev);
4825 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004826 netif_err(qdev, ifup, qdev->ndev,
4827 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004828 return;
4829 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004830 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004831 netif_err(qdev, ifup, qdev->ndev,
4832 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004833 }
Breno Leitao72046d82010-07-01 03:00:17 +00004834 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004835 netif_device_attach(ndev);
4836}
4837
4838static struct pci_error_handlers qlge_err_handler = {
4839 .error_detected = qlge_io_error_detected,
4840 .slot_reset = qlge_io_slot_reset,
4841 .resume = qlge_io_resume,
4842};
4843
4844static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4845{
4846 struct net_device *ndev = pci_get_drvdata(pdev);
4847 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004848 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004849
4850 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004851 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004852
4853 if (netif_running(ndev)) {
4854 err = ql_adapter_down(qdev);
4855 if (!err)
4856 return err;
4857 }
4858
Ron Mercerbc083ce2009-10-21 11:07:40 +00004859 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004860 err = pci_save_state(pdev);
4861 if (err)
4862 return err;
4863
4864 pci_disable_device(pdev);
4865
4866 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4867
4868 return 0;
4869}
4870
David S. Miller04da2cf2008-09-19 16:14:24 -07004871#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004872static int qlge_resume(struct pci_dev *pdev)
4873{
4874 struct net_device *ndev = pci_get_drvdata(pdev);
4875 struct ql_adapter *qdev = netdev_priv(ndev);
4876 int err;
4877
4878 pci_set_power_state(pdev, PCI_D0);
4879 pci_restore_state(pdev);
4880 err = pci_enable_device(pdev);
4881 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004882 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004883 return err;
4884 }
4885 pci_set_master(pdev);
4886
4887 pci_enable_wake(pdev, PCI_D3hot, 0);
4888 pci_enable_wake(pdev, PCI_D3cold, 0);
4889
4890 if (netif_running(ndev)) {
4891 err = ql_adapter_up(qdev);
4892 if (err)
4893 return err;
4894 }
4895
Breno Leitao72046d82010-07-01 03:00:17 +00004896 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004897 netif_device_attach(ndev);
4898
4899 return 0;
4900}
David S. Miller04da2cf2008-09-19 16:14:24 -07004901#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004902
4903static void qlge_shutdown(struct pci_dev *pdev)
4904{
4905 qlge_suspend(pdev, PMSG_SUSPEND);
4906}
4907
4908static struct pci_driver qlge_driver = {
4909 .name = DRV_NAME,
4910 .id_table = qlge_pci_tbl,
4911 .probe = qlge_probe,
4912 .remove = __devexit_p(qlge_remove),
4913#ifdef CONFIG_PM
4914 .suspend = qlge_suspend,
4915 .resume = qlge_resume,
4916#endif
4917 .shutdown = qlge_shutdown,
4918 .err_handler = &qlge_err_handler
4919};
4920
4921static int __init qlge_init_module(void)
4922{
4923 return pci_register_driver(&qlge_driver);
4924}
4925
4926static void __exit qlge_exit(void)
4927{
4928 pci_unregister_driver(&qlge_driver);
4929}
4930
4931module_init(qlge_init_module);
4932module_exit(qlge_exit);