blob: fb86f06e8f1e85f7ed08e024b82779eab903a356 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
Ron Mercer76b26692009-10-08 09:54:40 +0000408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400426 status =
427 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 status = -EPERM;
442 }
443exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 return status;
445}
446
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000447/* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452{
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000458 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000476 return status;
477}
478
Ron Mercer6a473302009-07-02 06:06:12 +0000479void ql_link_on(struct ql_adapter *qdev)
480{
Joe Perchesae9540f72010-02-09 11:49:52 +0000481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484}
485
486void ql_link_off(struct ql_adapter *qdev)
487{
Joe Perchesae9540f72010-02-09 11:49:52 +0000488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000572 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000579 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 return status;
615}
616
617static void ql_enable_interrupts(struct ql_adapter *qdev)
618{
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620}
621
622static void ql_disable_interrupts(struct ql_adapter *qdev)
623{
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625}
626
627/* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300630 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700633u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657}
658
659static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660{
661 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000671 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700672 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 var = ql_read32(qdev, STS);
676 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000678 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 return var;
680}
681
682static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683{
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696}
697
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000698static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699{
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000716
717 return csum;
718}
719
Ron Mercer26351472009-02-02 13:53:57 -0800720static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400740exit:
741 return status;
742}
743
Ron Mercercdca8d02009-03-02 08:07:31 +0000744static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745{
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000750 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
Ron Mercere4552f52009-06-09 05:39:32 +0000755 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 status = -EINVAL;
779 goto exit;
780 }
781
Ron Mercer542512e2009-06-09 05:39:33 +0000782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000801 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 qdev->ndev->addr_len);
803
804exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807}
808
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000809static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400810{
811 int i;
812 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800813 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800814 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816
817 /* Second function's parameters follow the first
818 * function's.
819 */
Ron Mercere4552f52009-06-09 05:39:32 +0000820 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000821 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800827 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 goto exit;
832 }
833
834 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400854exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857}
858
859/* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864{
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876}
877
878/* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883{
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899exit:
900 return status;
901}
902
903/* This is used for reading the 64-bit statistics regs. */
904int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905{
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920exit:
921 return status;
922}
923
Ron Mercercdca8d02009-03-02 08:07:31 +0000924static int ql_8000_port_initialize(struct ql_adapter *qdev)
925{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000926 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939exit:
940 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000941}
942
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400943/* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000949static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400950{
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964 }
965 return status;
966 }
967
Joe Perchesae9540f72010-02-09 11:49:52 +0000968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022}
1023
Ron Mercer7c734352009-10-19 03:32:19 +00001024static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025{
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027}
1028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001029/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001030static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031{
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038}
1039
Ron Mercer7c734352009-10-19 03:32:19 +00001040static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042{
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001046 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071}
1072
1073/* Update an rx ring index. */
1074static void ql_update_cq(struct rx_ring *rx_ring)
1075{
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082}
1083
1084static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085{
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087}
1088
Ron Mercer7c734352009-10-19 03:32:19 +00001089static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091{
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001136/* Process (refill) a large buffer queue. */
1137static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138{
Ron Mercer49f21862009-02-23 10:42:16 +00001139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001141 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001142 u64 map;
1143 int i;
1144
Ron Mercer7c734352009-10-19 03:32:19 +00001145 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001152 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001153 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 return;
1157 }
Ron Mercer7c734352009-10-19 03:32:19 +00001158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001163 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001164 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 }
1188}
1189
1190/* Process (refill) a small buffer queue. */
1191static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192{
Ron Mercer49f21862009-02-23 10:42:16 +00001193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001195 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001205 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001212 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Ron Mercer4f848c02010-01-02 10:37:43 +00001436/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001437static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 struct rx_ring *rx_ring,
1439 struct ib_mac_iocb_rsp *ib_mac_rsp,
1440 u32 length,
1441 u16 vlan_id)
1442{
1443 struct sk_buff *skb;
1444 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001445 struct napi_struct *napi = &rx_ring->napi;
1446
1447 napi->dev = qdev->ndev;
1448
1449 skb = napi_get_frags(napi);
1450 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001451 netif_err(qdev, drv, qdev->ndev,
1452 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001453 rx_ring->rx_dropped++;
1454 put_page(lbq_desc->p.pg_chunk.page);
1455 return;
1456 }
1457 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001458 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1459 lbq_desc->p.pg_chunk.page,
1460 lbq_desc->p.pg_chunk.offset,
1461 length);
Ron Mercer63526712010-01-02 10:37:44 +00001462
1463 skb->len += length;
1464 skb->data_len += length;
1465 skb->truesize += length;
1466 skb_shinfo(skb)->nr_frags++;
1467
1468 rx_ring->rx_packets++;
1469 rx_ring->rx_bytes += length;
1470 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001472 if (vlan_id != 0xffff)
1473 __vlan_hwaccel_put_tag(skb, vlan_id);
1474 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001475}
1476
1477/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001478static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1479 struct rx_ring *rx_ring,
1480 struct ib_mac_iocb_rsp *ib_mac_rsp,
1481 u32 length,
1482 u16 vlan_id)
1483{
1484 struct net_device *ndev = qdev->ndev;
1485 struct sk_buff *skb = NULL;
1486 void *addr;
1487 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1488 struct napi_struct *napi = &rx_ring->napi;
1489
1490 skb = netdev_alloc_skb(ndev, length);
1491 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001492 netif_err(qdev, drv, qdev->ndev,
1493 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001494 rx_ring->rx_dropped++;
1495 put_page(lbq_desc->p.pg_chunk.page);
1496 return;
1497 }
1498
1499 addr = lbq_desc->p.pg_chunk.va;
1500 prefetch(addr);
1501
1502
1503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001505 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001506 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001507 rx_ring->rx_errors++;
1508 goto err_out;
1509 }
1510
1511 /* The max framesize filter on this chip is set higher than
1512 * MTU since FCoE uses 2k frames.
1513 */
1514 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001515 netif_err(qdev, drv, qdev->ndev,
1516 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001517 rx_ring->rx_dropped++;
1518 goto err_out;
1519 }
1520 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1523 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001524 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1525 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1526 length-ETH_HLEN);
1527 skb->len += length-ETH_HLEN;
1528 skb->data_len += length-ETH_HLEN;
1529 skb->truesize += length-ETH_HLEN;
1530
1531 rx_ring->rx_packets++;
1532 rx_ring->rx_bytes += skb->len;
1533 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001534 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001535
Michał Mirosław88230fd2011-04-18 13:31:21 +00001536 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001537 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1538 /* TCP frame. */
1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1543 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1544 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1545 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001546 struct iphdr *iph =
1547 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001548 if (!(iph->frag_off &
1549 cpu_to_be16(IP_MF|IP_OFFSET))) {
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001551 netif_printk(qdev, rx_status, KERN_DEBUG,
1552 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001553 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 }
1555 }
1556 }
1557
1558 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001559 if (vlan_id != 0xffff)
1560 __vlan_hwaccel_put_tag(skb, vlan_id);
1561 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1562 napi_gro_receive(napi, skb);
1563 else
1564 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001565 return;
1566err_out:
1567 dev_kfree_skb_any(skb);
1568 put_page(lbq_desc->p.pg_chunk.page);
1569}
1570
1571/* Process an inbound completion from an rx ring. */
1572static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp,
1575 u32 length,
1576 u16 vlan_id)
1577{
1578 struct net_device *ndev = qdev->ndev;
1579 struct sk_buff *skb = NULL;
1580 struct sk_buff *new_skb = NULL;
1581 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1582
1583 skb = sbq_desc->p.skb;
1584 /* Allocate new_skb and copy */
1585 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1586 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001587 netif_err(qdev, probe, qdev->ndev,
1588 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001589 rx_ring->rx_dropped++;
1590 return;
1591 }
1592 skb_reserve(new_skb, NET_IP_ALIGN);
1593 memcpy(skb_put(new_skb, length), skb->data, length);
1594 skb = new_skb;
1595
1596 /* Frame error, so drop the packet. */
1597 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001598 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001599 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001600 dev_kfree_skb_any(skb);
1601 rx_ring->rx_errors++;
1602 return;
1603 }
1604
1605 /* loopback self test for ethtool */
1606 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1607 ql_check_lb_frame(qdev, skb);
1608 dev_kfree_skb_any(skb);
1609 return;
1610 }
1611
1612 /* The max framesize filter on this chip is set higher than
1613 * MTU since FCoE uses 2k frames.
1614 */
1615 if (skb->len > ndev->mtu + ETH_HLEN) {
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_dropped++;
1618 return;
1619 }
1620
1621 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001622 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001623 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1624 "%s Multicast.\n",
1625 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1626 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1627 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1628 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1629 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1630 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001631 }
1632 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001633 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1634 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001635
1636 rx_ring->rx_packets++;
1637 rx_ring->rx_bytes += skb->len;
1638 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001639 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001640
1641 /* If rx checksum is on, and there are no
1642 * csum or frame errors.
1643 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001644 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001645 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1646 /* TCP frame. */
1647 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001648 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1649 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001650 skb->ip_summed = CHECKSUM_UNNECESSARY;
1651 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1652 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1653 /* Unfragmented ipv4 UDP frame. */
1654 struct iphdr *iph = (struct iphdr *) skb->data;
1655 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001656 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001657 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001658 netif_printk(qdev, rx_status, KERN_DEBUG,
1659 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001660 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001661 }
1662 }
1663 }
1664
1665 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001666 if (vlan_id != 0xffff)
1667 __vlan_hwaccel_put_tag(skb, vlan_id);
1668 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1669 napi_gro_receive(&rx_ring->napi, skb);
1670 else
1671 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001672}
1673
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001674static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001675{
1676 void *temp_addr = skb->data;
1677
1678 /* Undo the skb_reserve(skb,32) we did before
1679 * giving to hardware, and realign data on
1680 * a 2-byte boundary.
1681 */
1682 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1683 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1684 skb_copy_to_linear_data(skb, temp_addr,
1685 (unsigned int)len);
1686}
1687
1688/*
1689 * This function builds an skb for the given inbound
1690 * completion. It will be rewritten for readability in the near
1691 * future, but for not it works well.
1692 */
1693static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1694 struct rx_ring *rx_ring,
1695 struct ib_mac_iocb_rsp *ib_mac_rsp)
1696{
1697 struct bq_desc *lbq_desc;
1698 struct bq_desc *sbq_desc;
1699 struct sk_buff *skb = NULL;
1700 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1701 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1702
1703 /*
1704 * Handle the header buffer if present.
1705 */
1706 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1707 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001708 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1709 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001710 /*
1711 * Headers fit nicely into a small buffer.
1712 */
1713 sbq_desc = ql_get_curr_sbuf(rx_ring);
1714 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001715 dma_unmap_addr(sbq_desc, mapaddr),
1716 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001717 PCI_DMA_FROMDEVICE);
1718 skb = sbq_desc->p.skb;
1719 ql_realign_skb(skb, hdr_len);
1720 skb_put(skb, hdr_len);
1721 sbq_desc->p.skb = NULL;
1722 }
1723
1724 /*
1725 * Handle the data buffer(s).
1726 */
1727 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001728 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1729 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001730 return skb;
1731 }
1732
1733 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1734 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001735 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1736 "Headers in small, data of %d bytes in small, combine them.\n",
1737 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001738 /*
1739 * Data is less than small buffer size so it's
1740 * stuffed in a small buffer.
1741 * For this case we append the data
1742 * from the "data" small buffer to the "header" small
1743 * buffer.
1744 */
1745 sbq_desc = ql_get_curr_sbuf(rx_ring);
1746 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001747 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001748 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001749 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 (sbq_desc, maplen),
1751 PCI_DMA_FROMDEVICE);
1752 memcpy(skb_put(skb, length),
1753 sbq_desc->p.skb->data, length);
1754 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001755 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756 (sbq_desc,
1757 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001758 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001759 (sbq_desc,
1760 maplen),
1761 PCI_DMA_FROMDEVICE);
1762 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001763 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1764 "%d bytes in a single small buffer.\n",
1765 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001766 sbq_desc = ql_get_curr_sbuf(rx_ring);
1767 skb = sbq_desc->p.skb;
1768 ql_realign_skb(skb, length);
1769 skb_put(skb, length);
1770 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001771 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001772 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001773 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001774 maplen),
1775 PCI_DMA_FROMDEVICE);
1776 sbq_desc->p.skb = NULL;
1777 }
1778 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1779 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001780 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1781 "Header in small, %d bytes in large. Chain large to small!\n",
1782 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001783 /*
1784 * The data is in a single large buffer. We
1785 * chain it to the header buffer's skb and let
1786 * it rip.
1787 */
Ron Mercer7c734352009-10-19 03:32:19 +00001788 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001789 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1790 "Chaining page at offset = %d, for %d bytes to skb.\n",
1791 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001792 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1793 lbq_desc->p.pg_chunk.offset,
1794 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 skb->len += length;
1796 skb->data_len += length;
1797 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001798 } else {
1799 /*
1800 * The headers and data are in a single large buffer. We
1801 * copy it to a new skb and let it go. This can happen with
1802 * jumbo mtu on a non-TCP/UDP frame.
1803 */
Ron Mercer7c734352009-10-19 03:32:19 +00001804 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 skb = netdev_alloc_skb(qdev->ndev, length);
1806 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001807 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1808 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001809 return NULL;
1810 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001811 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001812 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001813 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001814 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001815 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001817 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1818 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1819 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001820 skb_fill_page_desc(skb, 0,
1821 lbq_desc->p.pg_chunk.page,
1822 lbq_desc->p.pg_chunk.offset,
1823 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 skb->len += length;
1825 skb->data_len += length;
1826 skb->truesize += length;
1827 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 __pskb_pull_tail(skb,
1829 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1830 VLAN_ETH_HLEN : ETH_HLEN);
1831 }
1832 } else {
1833 /*
1834 * The data is in a chain of large buffers
1835 * pointed to by a small buffer. We loop
1836 * thru and chain them to the our small header
1837 * buffer's skb.
1838 * frags: There are 18 max frags and our small
1839 * buffer will hold 32 of them. The thing is,
1840 * we'll use 3 max for our 9000 byte jumbo
1841 * frames. If the MTU goes up we could
1842 * eventually be in trouble.
1843 */
Ron Mercer7c734352009-10-19 03:32:19 +00001844 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001845 sbq_desc = ql_get_curr_sbuf(rx_ring);
1846 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001847 dma_unmap_addr(sbq_desc, mapaddr),
1848 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 PCI_DMA_FROMDEVICE);
1850 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1851 /*
1852 * This is an non TCP/UDP IP frame, so
1853 * the headers aren't split into a small
1854 * buffer. We have to use the small buffer
1855 * that contains our sg list as our skb to
1856 * send upstairs. Copy the sg list here to
1857 * a local buffer and use it to find the
1858 * pages to chain.
1859 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001860 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1861 "%d bytes of headers & data in chain of large.\n",
1862 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001864 sbq_desc->p.skb = NULL;
1865 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 }
1867 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001868 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1869 size = (length < rx_ring->lbq_buf_size) ? length :
1870 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001871
Joe Perchesae9540f72010-02-09 11:49:52 +00001872 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873 "Adding page %d to skb for %d bytes.\n",
1874 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001875 skb_fill_page_desc(skb, i,
1876 lbq_desc->p.pg_chunk.page,
1877 lbq_desc->p.pg_chunk.offset,
1878 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001879 skb->len += size;
1880 skb->data_len += size;
1881 skb->truesize += size;
1882 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001883 i++;
1884 }
1885 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1886 VLAN_ETH_HLEN : ETH_HLEN);
1887 }
1888 return skb;
1889}
1890
1891/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001892static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001893 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001894 struct ib_mac_iocb_rsp *ib_mac_rsp,
1895 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896{
1897 struct net_device *ndev = qdev->ndev;
1898 struct sk_buff *skb = NULL;
1899
1900 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1901
1902 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1903 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001906 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001907 return;
1908 }
1909
Ron Mercera32959c2009-06-09 05:39:27 +00001910 /* Frame error, so drop the packet. */
1911 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001912 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001913 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001914 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001915 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001916 return;
1917 }
Ron Mercerec33a492009-06-09 05:39:28 +00001918
1919 /* The max framesize filter on this chip is set higher than
1920 * MTU since FCoE uses 2k frames.
1921 */
1922 if (skb->len > ndev->mtu + ETH_HLEN) {
1923 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001924 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001925 return;
1926 }
1927
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001928 /* loopback self test for ethtool */
1929 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1930 ql_check_lb_frame(qdev, skb);
1931 dev_kfree_skb_any(skb);
1932 return;
1933 }
1934
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001935 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001936 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1938 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1939 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001944 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001945 }
1946 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001947 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1948 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001949 }
Ron Mercerd555f592009-03-09 10:59:19 +00001950
Ron Mercerd555f592009-03-09 10:59:19 +00001951 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001952 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001953
1954 /* If rx checksum is on, and there are no
1955 * csum or frame errors.
1956 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001957 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001958 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1959 /* TCP frame. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001961 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1962 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001963 skb->ip_summed = CHECKSUM_UNNECESSARY;
1964 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1965 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1966 /* Unfragmented ipv4 UDP frame. */
1967 struct iphdr *iph = (struct iphdr *) skb->data;
1968 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001969 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00001970 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1972 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001973 }
1974 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001975 }
Ron Mercerd555f592009-03-09 10:59:19 +00001976
Ron Mercer885ee392009-11-03 13:49:31 +00001977 rx_ring->rx_packets++;
1978 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001979 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001980 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1981 __vlan_hwaccel_put_tag(skb, vlan_id);
1982 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1983 napi_gro_receive(&rx_ring->napi, skb);
1984 else
1985 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001986}
1987
Ron Mercer4f848c02010-01-02 10:37:43 +00001988/* Process an inbound completion from an rx ring. */
1989static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1990 struct rx_ring *rx_ring,
1991 struct ib_mac_iocb_rsp *ib_mac_rsp)
1992{
1993 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1994 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1995 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1996 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1997
1998 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1999
2000 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2001 /* The data and headers are split into
2002 * separate buffers.
2003 */
2004 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2005 vlan_id);
2006 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2007 /* The data fit in a single small buffer.
2008 * Allocate a new skb, copy the data and
2009 * return the buffer to the free pool.
2010 */
2011 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2012 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002013 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2014 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2015 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2016 /* TCP packet in a page chunk that's been checksummed.
2017 * Tack it on to our GRO skb and let it go.
2018 */
2019 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2020 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002021 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2022 /* Non-TCP packet in a page chunk. Allocate an
2023 * skb, tack it on frags, and send it up.
2024 */
2025 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2026 length, vlan_id);
2027 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002028 /* Non-TCP/UDP large frames that span multiple buffers
2029 * can be processed corrrectly by the split frame logic.
2030 */
2031 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2032 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002033 }
2034
2035 return (unsigned long)length;
2036}
2037
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002038/* Process an outbound completion from an rx ring. */
2039static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2040 struct ob_mac_iocb_rsp *mac_rsp)
2041{
2042 struct tx_ring *tx_ring;
2043 struct tx_ring_desc *tx_ring_desc;
2044
2045 QL_DUMP_OB_MAC_RSP(mac_rsp);
2046 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2047 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2048 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002049 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2050 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002051 dev_kfree_skb(tx_ring_desc->skb);
2052 tx_ring_desc->skb = NULL;
2053
2054 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2055 OB_MAC_IOCB_RSP_S |
2056 OB_MAC_IOCB_RSP_L |
2057 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2058 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002059 netif_warn(qdev, tx_done, qdev->ndev,
2060 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002061 }
2062 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002063 netif_warn(qdev, tx_done, qdev->ndev,
2064 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002065 }
2066 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002067 netif_warn(qdev, tx_done, qdev->ndev,
2068 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002069 }
2070 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002071 netif_warn(qdev, tx_done, qdev->ndev,
2072 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002073 }
2074 }
2075 atomic_inc(&tx_ring->tx_count);
2076}
2077
2078/* Fire up a handler to reset the MPI processor. */
2079void ql_queue_fw_error(struct ql_adapter *qdev)
2080{
Ron Mercer6a473302009-07-02 06:06:12 +00002081 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002082 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2083}
2084
2085void ql_queue_asic_error(struct ql_adapter *qdev)
2086{
Ron Mercer6a473302009-07-02 06:06:12 +00002087 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002088 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002089 /* Clear adapter up bit to signal the recovery
2090 * process that it shouldn't kill the reset worker
2091 * thread
2092 */
2093 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002094 /* Set asic recovery bit to indicate reset process that we are
2095 * in fatal error recovery process rather than normal close
2096 */
2097 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002098 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2099}
2100
2101static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2102 struct ib_ae_iocb_rsp *ib_ae_rsp)
2103{
2104 switch (ib_ae_rsp->event) {
2105 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002106 netif_err(qdev, rx_err, qdev->ndev,
2107 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002108 ql_queue_fw_error(qdev);
2109 return;
2110
2111 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002112 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2113 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002114 ql_queue_asic_error(qdev);
2115 return;
2116
2117 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002118 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002119 ql_queue_asic_error(qdev);
2120 break;
2121
2122 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002123 netdev_err(qdev->ndev, "PCI error occurred when reading "
2124 "anonymous buffers from rx_ring %d.\n",
2125 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 ql_queue_asic_error(qdev);
2127 break;
2128
2129 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002130 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2131 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 ql_queue_asic_error(qdev);
2133 break;
2134 }
2135}
2136
2137static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2138{
2139 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002140 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002141 struct ob_mac_iocb_rsp *net_rsp = NULL;
2142 int count = 0;
2143
Ron Mercer1e213302009-03-09 10:59:21 +00002144 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 /* While there are entries in the completion queue. */
2146 while (prod != rx_ring->cnsmr_idx) {
2147
Joe Perchesae9540f72010-02-09 11:49:52 +00002148 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2149 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2150 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002151
2152 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2153 rmb();
2154 switch (net_rsp->opcode) {
2155
2156 case OPCODE_OB_MAC_TSO_IOCB:
2157 case OPCODE_OB_MAC_IOCB:
2158 ql_process_mac_tx_intr(qdev, net_rsp);
2159 break;
2160 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2162 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2163 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002164 }
2165 count++;
2166 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002167 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002168 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002169 if (!net_rsp)
2170 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002171 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002172 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002173 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002174 if (atomic_read(&tx_ring->queue_stopped) &&
2175 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2176 /*
2177 * The queue got stopped because the tx_ring was full.
2178 * Wake it up, because it's now at least 25% empty.
2179 */
Ron Mercer1e213302009-03-09 10:59:21 +00002180 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002181 }
2182
2183 return count;
2184}
2185
2186static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2187{
2188 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002189 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002190 struct ql_net_rsp_iocb *net_rsp;
2191 int count = 0;
2192
2193 /* While there are entries in the completion queue. */
2194 while (prod != rx_ring->cnsmr_idx) {
2195
Joe Perchesae9540f72010-02-09 11:49:52 +00002196 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2197 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2198 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002199
2200 net_rsp = rx_ring->curr_entry;
2201 rmb();
2202 switch (net_rsp->opcode) {
2203 case OPCODE_IB_MAC_IOCB:
2204 ql_process_mac_rx_intr(qdev, rx_ring,
2205 (struct ib_mac_iocb_rsp *)
2206 net_rsp);
2207 break;
2208
2209 case OPCODE_IB_AE_IOCB:
2210 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2211 net_rsp);
2212 break;
2213 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2215 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2216 net_rsp->opcode);
2217 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002218 }
2219 count++;
2220 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002221 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002222 if (count == budget)
2223 break;
2224 }
2225 ql_update_buffer_queues(qdev, rx_ring);
2226 ql_write_cq_idx(rx_ring);
2227 return count;
2228}
2229
2230static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2231{
2232 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2233 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002234 struct rx_ring *trx_ring;
2235 int i, work_done = 0;
2236 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002237
Joe Perchesae9540f72010-02-09 11:49:52 +00002238 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2239 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002240
Ron Mercer39aa8162009-08-27 11:02:11 +00002241 /* Service the TX rings first. They start
2242 * right after the RSS rings. */
2243 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2244 trx_ring = &qdev->rx_ring[i];
2245 /* If this TX completion ring belongs to this vector and
2246 * it's not empty then service it.
2247 */
2248 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2249 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2250 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002251 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2252 "%s: Servicing TX completion ring %d.\n",
2253 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002254 ql_clean_outbound_rx_ring(trx_ring);
2255 }
2256 }
2257
2258 /*
2259 * Now service the RSS ring if it's active.
2260 */
2261 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2262 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002263 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2264 "%s: Servicing RX completion ring %d.\n",
2265 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002266 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2267 }
2268
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002269 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002270 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2272 }
2273 return work_done;
2274}
2275
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002276static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002277{
2278 struct ql_adapter *qdev = netdev_priv(ndev);
2279
Jiri Pirko18c49b92011-07-21 03:24:11 +00002280 if (features & NETIF_F_HW_VLAN_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002281 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002282 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002283 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002284 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2285 }
2286}
2287
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002288static netdev_features_t qlge_fix_features(struct net_device *ndev,
2289 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002290{
2291 /*
2292 * Since there is no support for separate rx/tx vlan accel
2293 * enable/disable make sure tx flag is always in same state as rx.
2294 */
2295 if (features & NETIF_F_HW_VLAN_RX)
2296 features |= NETIF_F_HW_VLAN_TX;
2297 else
2298 features &= ~NETIF_F_HW_VLAN_TX;
2299
2300 return features;
2301}
2302
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002303static int qlge_set_features(struct net_device *ndev,
2304 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002305{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002306 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002307
2308 if (changed & NETIF_F_HW_VLAN_RX)
2309 qlge_vlan_mode(ndev, features);
2310
2311 return 0;
2312}
2313
Jiri Pirko8e586132011-12-08 19:52:37 -05002314static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002315{
2316 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002317 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002318
Jiri Pirko8e586132011-12-08 19:52:37 -05002319 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2320 MAC_ADDR_TYPE_VLAN, vid);
2321 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002322 netif_err(qdev, ifup, qdev->ndev,
2323 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002324 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002325}
2326
Jiri Pirko8e586132011-12-08 19:52:37 -05002327static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328{
2329 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002330 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002331 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002332
Ron Mercercc288f52009-02-23 10:42:14 +00002333 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2334 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002335 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002336
Jiri Pirko8e586132011-12-08 19:52:37 -05002337 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002338 set_bit(vid, qdev->active_vlans);
2339
Ron Mercercc288f52009-02-23 10:42:14 +00002340 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002341
2342 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002343}
2344
Jiri Pirko8e586132011-12-08 19:52:37 -05002345static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002346{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002347 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002348 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002349
Jiri Pirko8e586132011-12-08 19:52:37 -05002350 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2351 MAC_ADDR_TYPE_VLAN, vid);
2352 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002353 netif_err(qdev, ifup, qdev->ndev,
2354 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002355 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002356}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002357
Jiri Pirko8e586132011-12-08 19:52:37 -05002358static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002359{
2360 struct ql_adapter *qdev = netdev_priv(ndev);
2361 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002362 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002363
2364 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2365 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002366 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002367
Jiri Pirko8e586132011-12-08 19:52:37 -05002368 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002369 clear_bit(vid, qdev->active_vlans);
2370
2371 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002372
2373 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002374}
2375
Ron Mercerc1b60092010-10-27 04:58:12 +00002376static void qlge_restore_vlan(struct ql_adapter *qdev)
2377{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002378 int status;
2379 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002380
Jiri Pirko18c49b92011-07-21 03:24:11 +00002381 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2382 if (status)
2383 return;
2384
2385 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2386 __qlge_vlan_rx_add_vid(qdev, vid);
2387
2388 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002389}
2390
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002391/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2392static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2393{
2394 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002395 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002396 return IRQ_HANDLED;
2397}
2398
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002399/* This handles a fatal error, MPI activity, and the default
2400 * rx_ring in an MSI-X multiple vector environment.
2401 * In MSI/Legacy environment it also process the rest of
2402 * the rx_rings.
2403 */
2404static irqreturn_t qlge_isr(int irq, void *dev_id)
2405{
2406 struct rx_ring *rx_ring = dev_id;
2407 struct ql_adapter *qdev = rx_ring->qdev;
2408 struct intr_context *intr_context = &qdev->intr_context[0];
2409 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002410 int work_done = 0;
2411
Ron Mercerbb0d2152008-10-20 10:30:26 -07002412 spin_lock(&qdev->hw_lock);
2413 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002414 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2415 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002416 spin_unlock(&qdev->hw_lock);
2417 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002418 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002419 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002420
Ron Mercerbb0d2152008-10-20 10:30:26 -07002421 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002422
2423 /*
2424 * Check for fatal error.
2425 */
2426 if (var & STS_FE) {
2427 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002428 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002429 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002430 netdev_err(qdev->ndev, "Resetting chip. "
2431 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002432 return IRQ_HANDLED;
2433 }
2434
2435 /*
2436 * Check MPI processor activity.
2437 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002438 if ((var & STS_PI) &&
2439 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002440 /*
2441 * We've got an async event or mailbox completion.
2442 * Handle it and clear the source of the interrupt.
2443 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002444 netif_err(qdev, intr, qdev->ndev,
2445 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002446 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002447 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2448 queue_delayed_work_on(smp_processor_id(),
2449 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002450 work_done++;
2451 }
2452
2453 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002454 * Get the bit-mask that shows the active queues for this
2455 * pass. Compare it to the queues that this irq services
2456 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002457 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002458 var = ql_read32(qdev, ISR1);
2459 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002460 netif_info(qdev, intr, qdev->ndev,
2461 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002462 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002463 napi_schedule(&rx_ring->napi);
2464 work_done++;
2465 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002466 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002467 return work_done ? IRQ_HANDLED : IRQ_NONE;
2468}
2469
2470static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2471{
2472
2473 if (skb_is_gso(skb)) {
2474 int err;
2475 if (skb_header_cloned(skb)) {
2476 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2477 if (err)
2478 return err;
2479 }
2480
2481 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2482 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2483 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2484 mac_iocb_ptr->total_hdrs_len =
2485 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2486 mac_iocb_ptr->net_trans_offset =
2487 cpu_to_le16(skb_network_offset(skb) |
2488 skb_transport_offset(skb)
2489 << OB_MAC_TRANSPORT_HDR_SHIFT);
2490 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2491 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2492 if (likely(skb->protocol == htons(ETH_P_IP))) {
2493 struct iphdr *iph = ip_hdr(skb);
2494 iph->check = 0;
2495 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2496 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2497 iph->daddr, 0,
2498 IPPROTO_TCP,
2499 0);
2500 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2501 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2502 tcp_hdr(skb)->check =
2503 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2504 &ipv6_hdr(skb)->daddr,
2505 0, IPPROTO_TCP, 0);
2506 }
2507 return 1;
2508 }
2509 return 0;
2510}
2511
2512static void ql_hw_csum_setup(struct sk_buff *skb,
2513 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2514{
2515 int len;
2516 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002517 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002518 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2519 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2520 mac_iocb_ptr->net_trans_offset =
2521 cpu_to_le16(skb_network_offset(skb) |
2522 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2523
2524 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2525 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2526 if (likely(iph->protocol == IPPROTO_TCP)) {
2527 check = &(tcp_hdr(skb)->check);
2528 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2529 mac_iocb_ptr->total_hdrs_len =
2530 cpu_to_le16(skb_transport_offset(skb) +
2531 (tcp_hdr(skb)->doff << 2));
2532 } else {
2533 check = &(udp_hdr(skb)->check);
2534 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2535 mac_iocb_ptr->total_hdrs_len =
2536 cpu_to_le16(skb_transport_offset(skb) +
2537 sizeof(struct udphdr));
2538 }
2539 *check = ~csum_tcpudp_magic(iph->saddr,
2540 iph->daddr, len, iph->protocol, 0);
2541}
2542
Stephen Hemminger613573252009-08-31 19:50:58 +00002543static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002544{
2545 struct tx_ring_desc *tx_ring_desc;
2546 struct ob_mac_iocb_req *mac_iocb_ptr;
2547 struct ql_adapter *qdev = netdev_priv(ndev);
2548 int tso;
2549 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002550 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002551
2552 tx_ring = &qdev->tx_ring[tx_ring_idx];
2553
Ron Mercer74c50b42009-03-09 10:59:27 +00002554 if (skb_padto(skb, ETH_ZLEN))
2555 return NETDEV_TX_OK;
2556
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002557 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002558 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002559 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002560 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002561 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002562 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002563 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002564 return NETDEV_TX_BUSY;
2565 }
2566 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2567 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002568 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002569
2570 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2571 mac_iocb_ptr->tid = tx_ring_desc->index;
2572 /* We use the upper 32-bits to store the tx queue for this IO.
2573 * When we get the completion we can use it to establish the context.
2574 */
2575 mac_iocb_ptr->txq_idx = tx_ring_idx;
2576 tx_ring_desc->skb = skb;
2577
2578 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2579
Jesse Grosseab6d182010-10-20 13:56:03 +00002580 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002581 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2582 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002583 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2584 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2585 }
2586 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2587 if (tso < 0) {
2588 dev_kfree_skb_any(skb);
2589 return NETDEV_TX_OK;
2590 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2591 ql_hw_csum_setup(skb,
2592 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2593 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002594 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2595 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002596 netif_err(qdev, tx_queued, qdev->ndev,
2597 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002598 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002599 return NETDEV_TX_BUSY;
2600 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002601 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2602 tx_ring->prod_idx++;
2603 if (tx_ring->prod_idx == tx_ring->wq_len)
2604 tx_ring->prod_idx = 0;
2605 wmb();
2606
2607 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002608 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2609 "tx queued, slot %d, len %d\n",
2610 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002611
2612 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002613
2614 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2615 netif_stop_subqueue(ndev, tx_ring->wq_id);
2616 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2617 /*
2618 * The queue got stopped because the tx_ring was full.
2619 * Wake it up, because it's now at least 25% empty.
2620 */
2621 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2622 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002623 return NETDEV_TX_OK;
2624}
2625
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002626
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002627static void ql_free_shadow_space(struct ql_adapter *qdev)
2628{
2629 if (qdev->rx_ring_shadow_reg_area) {
2630 pci_free_consistent(qdev->pdev,
2631 PAGE_SIZE,
2632 qdev->rx_ring_shadow_reg_area,
2633 qdev->rx_ring_shadow_reg_dma);
2634 qdev->rx_ring_shadow_reg_area = NULL;
2635 }
2636 if (qdev->tx_ring_shadow_reg_area) {
2637 pci_free_consistent(qdev->pdev,
2638 PAGE_SIZE,
2639 qdev->tx_ring_shadow_reg_area,
2640 qdev->tx_ring_shadow_reg_dma);
2641 qdev->tx_ring_shadow_reg_area = NULL;
2642 }
2643}
2644
2645static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2646{
2647 qdev->rx_ring_shadow_reg_area =
2648 pci_alloc_consistent(qdev->pdev,
2649 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2650 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002651 netif_err(qdev, ifup, qdev->ndev,
2652 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002653 return -ENOMEM;
2654 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002655 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002656 qdev->tx_ring_shadow_reg_area =
2657 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2658 &qdev->tx_ring_shadow_reg_dma);
2659 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002660 netif_err(qdev, ifup, qdev->ndev,
2661 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002662 goto err_wqp_sh_area;
2663 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002664 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002665 return 0;
2666
2667err_wqp_sh_area:
2668 pci_free_consistent(qdev->pdev,
2669 PAGE_SIZE,
2670 qdev->rx_ring_shadow_reg_area,
2671 qdev->rx_ring_shadow_reg_dma);
2672 return -ENOMEM;
2673}
2674
2675static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2676{
2677 struct tx_ring_desc *tx_ring_desc;
2678 int i;
2679 struct ob_mac_iocb_req *mac_iocb_ptr;
2680
2681 mac_iocb_ptr = tx_ring->wq_base;
2682 tx_ring_desc = tx_ring->q;
2683 for (i = 0; i < tx_ring->wq_len; i++) {
2684 tx_ring_desc->index = i;
2685 tx_ring_desc->skb = NULL;
2686 tx_ring_desc->queue_entry = mac_iocb_ptr;
2687 mac_iocb_ptr++;
2688 tx_ring_desc++;
2689 }
2690 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2691 atomic_set(&tx_ring->queue_stopped, 0);
2692}
2693
2694static void ql_free_tx_resources(struct ql_adapter *qdev,
2695 struct tx_ring *tx_ring)
2696{
2697 if (tx_ring->wq_base) {
2698 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2699 tx_ring->wq_base, tx_ring->wq_base_dma);
2700 tx_ring->wq_base = NULL;
2701 }
2702 kfree(tx_ring->q);
2703 tx_ring->q = NULL;
2704}
2705
2706static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2707 struct tx_ring *tx_ring)
2708{
2709 tx_ring->wq_base =
2710 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2711 &tx_ring->wq_base_dma);
2712
Joe Perches8e95a202009-12-03 07:58:21 +00002713 if ((tx_ring->wq_base == NULL) ||
2714 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002715 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002716 return -ENOMEM;
2717 }
2718 tx_ring->q =
2719 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2720 if (tx_ring->q == NULL)
2721 goto err;
2722
2723 return 0;
2724err:
2725 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2726 tx_ring->wq_base, tx_ring->wq_base_dma);
2727 return -ENOMEM;
2728}
2729
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002730static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002731{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002732 struct bq_desc *lbq_desc;
2733
Ron Mercer7c734352009-10-19 03:32:19 +00002734 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002735
Ron Mercer7c734352009-10-19 03:32:19 +00002736 curr_idx = rx_ring->lbq_curr_idx;
2737 clean_idx = rx_ring->lbq_clean_idx;
2738 while (curr_idx != clean_idx) {
2739 lbq_desc = &rx_ring->lbq[curr_idx];
2740
2741 if (lbq_desc->p.pg_chunk.last_flag) {
2742 pci_unmap_page(qdev->pdev,
2743 lbq_desc->p.pg_chunk.map,
2744 ql_lbq_block_size(qdev),
2745 PCI_DMA_FROMDEVICE);
2746 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002747 }
Ron Mercer7c734352009-10-19 03:32:19 +00002748
2749 put_page(lbq_desc->p.pg_chunk.page);
2750 lbq_desc->p.pg_chunk.page = NULL;
2751
2752 if (++curr_idx == rx_ring->lbq_len)
2753 curr_idx = 0;
2754
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002755 }
2756}
2757
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002758static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002759{
2760 int i;
2761 struct bq_desc *sbq_desc;
2762
2763 for (i = 0; i < rx_ring->sbq_len; i++) {
2764 sbq_desc = &rx_ring->sbq[i];
2765 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002766 netif_err(qdev, ifup, qdev->ndev,
2767 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002768 return;
2769 }
2770 if (sbq_desc->p.skb) {
2771 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002772 dma_unmap_addr(sbq_desc, mapaddr),
2773 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002774 PCI_DMA_FROMDEVICE);
2775 dev_kfree_skb(sbq_desc->p.skb);
2776 sbq_desc->p.skb = NULL;
2777 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002778 }
2779}
2780
Ron Mercer4545a3f2009-02-23 10:42:17 +00002781/* Free all large and small rx buffers associated
2782 * with the completion queues for this device.
2783 */
2784static void ql_free_rx_buffers(struct ql_adapter *qdev)
2785{
2786 int i;
2787 struct rx_ring *rx_ring;
2788
2789 for (i = 0; i < qdev->rx_ring_count; i++) {
2790 rx_ring = &qdev->rx_ring[i];
2791 if (rx_ring->lbq)
2792 ql_free_lbq_buffers(qdev, rx_ring);
2793 if (rx_ring->sbq)
2794 ql_free_sbq_buffers(qdev, rx_ring);
2795 }
2796}
2797
2798static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2799{
2800 struct rx_ring *rx_ring;
2801 int i;
2802
2803 for (i = 0; i < qdev->rx_ring_count; i++) {
2804 rx_ring = &qdev->rx_ring[i];
2805 if (rx_ring->type != TX_Q)
2806 ql_update_buffer_queues(qdev, rx_ring);
2807 }
2808}
2809
2810static void ql_init_lbq_ring(struct ql_adapter *qdev,
2811 struct rx_ring *rx_ring)
2812{
2813 int i;
2814 struct bq_desc *lbq_desc;
2815 __le64 *bq = rx_ring->lbq_base;
2816
2817 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2818 for (i = 0; i < rx_ring->lbq_len; i++) {
2819 lbq_desc = &rx_ring->lbq[i];
2820 memset(lbq_desc, 0, sizeof(*lbq_desc));
2821 lbq_desc->index = i;
2822 lbq_desc->addr = bq;
2823 bq++;
2824 }
2825}
2826
2827static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002828 struct rx_ring *rx_ring)
2829{
2830 int i;
2831 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002832 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002833
Ron Mercer4545a3f2009-02-23 10:42:17 +00002834 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002835 for (i = 0; i < rx_ring->sbq_len; i++) {
2836 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002837 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002838 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002839 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002840 bq++;
2841 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002842}
2843
2844static void ql_free_rx_resources(struct ql_adapter *qdev,
2845 struct rx_ring *rx_ring)
2846{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002847 /* Free the small buffer queue. */
2848 if (rx_ring->sbq_base) {
2849 pci_free_consistent(qdev->pdev,
2850 rx_ring->sbq_size,
2851 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2852 rx_ring->sbq_base = NULL;
2853 }
2854
2855 /* Free the small buffer queue control blocks. */
2856 kfree(rx_ring->sbq);
2857 rx_ring->sbq = NULL;
2858
2859 /* Free the large buffer queue. */
2860 if (rx_ring->lbq_base) {
2861 pci_free_consistent(qdev->pdev,
2862 rx_ring->lbq_size,
2863 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2864 rx_ring->lbq_base = NULL;
2865 }
2866
2867 /* Free the large buffer queue control blocks. */
2868 kfree(rx_ring->lbq);
2869 rx_ring->lbq = NULL;
2870
2871 /* Free the rx queue. */
2872 if (rx_ring->cq_base) {
2873 pci_free_consistent(qdev->pdev,
2874 rx_ring->cq_size,
2875 rx_ring->cq_base, rx_ring->cq_base_dma);
2876 rx_ring->cq_base = NULL;
2877 }
2878}
2879
2880/* Allocate queues and buffers for this completions queue based
2881 * on the values in the parameter structure. */
2882static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2883 struct rx_ring *rx_ring)
2884{
2885
2886 /*
2887 * Allocate the completion queue for this rx_ring.
2888 */
2889 rx_ring->cq_base =
2890 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2891 &rx_ring->cq_base_dma);
2892
2893 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002894 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002895 return -ENOMEM;
2896 }
2897
2898 if (rx_ring->sbq_len) {
2899 /*
2900 * Allocate small buffer queue.
2901 */
2902 rx_ring->sbq_base =
2903 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2904 &rx_ring->sbq_base_dma);
2905
2906 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002907 netif_err(qdev, ifup, qdev->ndev,
2908 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002909 goto err_mem;
2910 }
2911
2912 /*
2913 * Allocate small buffer queue control blocks.
2914 */
2915 rx_ring->sbq =
2916 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2917 GFP_KERNEL);
2918 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002919 netif_err(qdev, ifup, qdev->ndev,
2920 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002921 goto err_mem;
2922 }
2923
Ron Mercer4545a3f2009-02-23 10:42:17 +00002924 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002925 }
2926
2927 if (rx_ring->lbq_len) {
2928 /*
2929 * Allocate large buffer queue.
2930 */
2931 rx_ring->lbq_base =
2932 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2933 &rx_ring->lbq_base_dma);
2934
2935 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002936 netif_err(qdev, ifup, qdev->ndev,
2937 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002938 goto err_mem;
2939 }
2940 /*
2941 * Allocate large buffer queue control blocks.
2942 */
2943 rx_ring->lbq =
2944 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2945 GFP_KERNEL);
2946 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002947 netif_err(qdev, ifup, qdev->ndev,
2948 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002949 goto err_mem;
2950 }
2951
Ron Mercer4545a3f2009-02-23 10:42:17 +00002952 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002953 }
2954
2955 return 0;
2956
2957err_mem:
2958 ql_free_rx_resources(qdev, rx_ring);
2959 return -ENOMEM;
2960}
2961
2962static void ql_tx_ring_clean(struct ql_adapter *qdev)
2963{
2964 struct tx_ring *tx_ring;
2965 struct tx_ring_desc *tx_ring_desc;
2966 int i, j;
2967
2968 /*
2969 * Loop through all queues and free
2970 * any resources.
2971 */
2972 for (j = 0; j < qdev->tx_ring_count; j++) {
2973 tx_ring = &qdev->tx_ring[j];
2974 for (i = 0; i < tx_ring->wq_len; i++) {
2975 tx_ring_desc = &tx_ring->q[i];
2976 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002977 netif_err(qdev, ifdown, qdev->ndev,
2978 "Freeing lost SKB %p, from queue %d, index %d.\n",
2979 tx_ring_desc->skb, j,
2980 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002981 ql_unmap_send(qdev, tx_ring_desc,
2982 tx_ring_desc->map_cnt);
2983 dev_kfree_skb(tx_ring_desc->skb);
2984 tx_ring_desc->skb = NULL;
2985 }
2986 }
2987 }
2988}
2989
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002990static void ql_free_mem_resources(struct ql_adapter *qdev)
2991{
2992 int i;
2993
2994 for (i = 0; i < qdev->tx_ring_count; i++)
2995 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2996 for (i = 0; i < qdev->rx_ring_count; i++)
2997 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2998 ql_free_shadow_space(qdev);
2999}
3000
3001static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3002{
3003 int i;
3004
3005 /* Allocate space for our shadow registers and such. */
3006 if (ql_alloc_shadow_space(qdev))
3007 return -ENOMEM;
3008
3009 for (i = 0; i < qdev->rx_ring_count; i++) {
3010 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003011 netif_err(qdev, ifup, qdev->ndev,
3012 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003013 goto err_mem;
3014 }
3015 }
3016 /* Allocate tx queue resources */
3017 for (i = 0; i < qdev->tx_ring_count; i++) {
3018 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003019 netif_err(qdev, ifup, qdev->ndev,
3020 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003021 goto err_mem;
3022 }
3023 }
3024 return 0;
3025
3026err_mem:
3027 ql_free_mem_resources(qdev);
3028 return -ENOMEM;
3029}
3030
3031/* Set up the rx ring control block and pass it to the chip.
3032 * The control block is defined as
3033 * "Completion Queue Initialization Control Block", or cqicb.
3034 */
3035static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3036{
3037 struct cqicb *cqicb = &rx_ring->cqicb;
3038 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003039 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003040 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003041 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003042 void __iomem *doorbell_area =
3043 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3044 int err = 0;
3045 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003046 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003047 __le64 *base_indirect_ptr;
3048 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003049
3050 /* Set up the shadow registers for this ring. */
3051 rx_ring->prod_idx_sh_reg = shadow_reg;
3052 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003053 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003054 shadow_reg += sizeof(u64);
3055 shadow_reg_dma += sizeof(u64);
3056 rx_ring->lbq_base_indirect = shadow_reg;
3057 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003058 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3059 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003060 rx_ring->sbq_base_indirect = shadow_reg;
3061 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3062
3063 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003064 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003065 rx_ring->cnsmr_idx = 0;
3066 rx_ring->curr_entry = rx_ring->cq_base;
3067
3068 /* PCI doorbell mem area + 0x04 for valid register */
3069 rx_ring->valid_db_reg = doorbell_area + 0x04;
3070
3071 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003072 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003073
3074 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003075 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076
3077 memset((void *)cqicb, 0, sizeof(struct cqicb));
3078 cqicb->msix_vect = rx_ring->irq;
3079
Ron Mercer459caf52009-01-04 17:08:11 -08003080 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3081 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003082
Ron Mercer97345522009-01-09 11:31:50 +00003083 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003084
Ron Mercer97345522009-01-09 11:31:50 +00003085 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003086
3087 /*
3088 * Set up the control block load flags.
3089 */
3090 cqicb->flags = FLAGS_LC | /* Load queue base address */
3091 FLAGS_LV | /* Load MSI-X vector */
3092 FLAGS_LI; /* Load irq delay values */
3093 if (rx_ring->lbq_len) {
3094 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003095 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003096 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003097 page_entries = 0;
3098 do {
3099 *base_indirect_ptr = cpu_to_le64(tmp);
3100 tmp += DB_PAGE_SIZE;
3101 base_indirect_ptr++;
3102 page_entries++;
3103 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003104 cqicb->lbq_addr =
3105 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003106 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3107 (u16) rx_ring->lbq_buf_size;
3108 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3109 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3110 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003111 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003112 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003113 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003114 rx_ring->lbq_clean_idx = 0;
3115 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003116 }
3117 if (rx_ring->sbq_len) {
3118 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003119 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003120 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003121 page_entries = 0;
3122 do {
3123 *base_indirect_ptr = cpu_to_le64(tmp);
3124 tmp += DB_PAGE_SIZE;
3125 base_indirect_ptr++;
3126 page_entries++;
3127 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003128 cqicb->sbq_addr =
3129 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003130 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003131 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003132 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3133 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003135 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003136 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003137 rx_ring->sbq_clean_idx = 0;
3138 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003139 }
3140 switch (rx_ring->type) {
3141 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3143 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3144 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003145 case RX_Q:
3146 /* Inbound completion handling rx_rings run in
3147 * separate NAPI contexts.
3148 */
3149 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3150 64);
3151 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3152 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3153 break;
3154 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003155 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3156 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003157 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3159 CFG_LCQ, rx_ring->cq_id);
3160 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003161 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003162 return err;
3163 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003164 return err;
3165}
3166
3167static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3168{
3169 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3170 void __iomem *doorbell_area =
3171 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3172 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3173 (tx_ring->wq_id * sizeof(u64));
3174 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3175 (tx_ring->wq_id * sizeof(u64));
3176 int err = 0;
3177
3178 /*
3179 * Assign doorbell registers for this tx_ring.
3180 */
3181 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003182 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003183 tx_ring->prod_idx = 0;
3184 /* TX PCI doorbell mem area + 0x04 */
3185 tx_ring->valid_db_reg = doorbell_area + 0x04;
3186
3187 /*
3188 * Assign shadow registers for this tx_ring.
3189 */
3190 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3191 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3192
3193 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3194 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3195 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3196 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3197 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003198 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003199
Ron Mercer97345522009-01-09 11:31:50 +00003200 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003201
3202 ql_init_tx_ring(qdev, tx_ring);
3203
Ron Mercere3324712009-07-02 06:06:13 +00003204 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003205 (u16) tx_ring->wq_id);
3206 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003207 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003208 return err;
3209 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003210 return err;
3211}
3212
3213static void ql_disable_msix(struct ql_adapter *qdev)
3214{
3215 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3216 pci_disable_msix(qdev->pdev);
3217 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3218 kfree(qdev->msi_x_entry);
3219 qdev->msi_x_entry = NULL;
3220 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3221 pci_disable_msi(qdev->pdev);
3222 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3223 }
3224}
3225
Ron Mercera4ab6132009-08-27 11:02:10 +00003226/* We start by trying to get the number of vectors
3227 * stored in qdev->intr_count. If we don't get that
3228 * many then we reduce the count and try again.
3229 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230static void ql_enable_msix(struct ql_adapter *qdev)
3231{
Ron Mercera4ab6132009-08-27 11:02:10 +00003232 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003233
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003234 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003235 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 /* Try to alloc space for the msix struct,
3237 * if it fails then go to MSI/legacy.
3238 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003239 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240 sizeof(struct msix_entry),
3241 GFP_KERNEL);
3242 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003243 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003244 goto msi;
3245 }
3246
Ron Mercera4ab6132009-08-27 11:02:10 +00003247 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003248 qdev->msi_x_entry[i].entry = i;
3249
Ron Mercera4ab6132009-08-27 11:02:10 +00003250 /* Loop to get our vectors. We start with
3251 * what we want and settle for what we get.
3252 */
3253 do {
3254 err = pci_enable_msix(qdev->pdev,
3255 qdev->msi_x_entry, qdev->intr_count);
3256 if (err > 0)
3257 qdev->intr_count = err;
3258 } while (err > 0);
3259
3260 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003261 kfree(qdev->msi_x_entry);
3262 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003263 netif_warn(qdev, ifup, qdev->ndev,
3264 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003265 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003266 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003267 } else if (err == 0) {
3268 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003269 netif_info(qdev, ifup, qdev->ndev,
3270 "MSI-X Enabled, got %d vectors.\n",
3271 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003272 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003273 }
3274 }
3275msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003276 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003277 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003278 if (!pci_enable_msi(qdev->pdev)) {
3279 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003280 netif_info(qdev, ifup, qdev->ndev,
3281 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003282 return;
3283 }
3284 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003285 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003286 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3287 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003288}
3289
Ron Mercer39aa8162009-08-27 11:02:11 +00003290/* Each vector services 1 RSS ring and and 1 or more
3291 * TX completion rings. This function loops through
3292 * the TX completion rings and assigns the vector that
3293 * will service it. An example would be if there are
3294 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3295 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003296 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003297 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3298 */
3299static void ql_set_tx_vect(struct ql_adapter *qdev)
3300{
3301 int i, j, vect;
3302 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3303
3304 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3305 /* Assign irq vectors to TX rx_rings.*/
3306 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3307 i < qdev->rx_ring_count; i++) {
3308 if (j == tx_rings_per_vector) {
3309 vect++;
3310 j = 0;
3311 }
3312 qdev->rx_ring[i].irq = vect;
3313 j++;
3314 }
3315 } else {
3316 /* For single vector all rings have an irq
3317 * of zero.
3318 */
3319 for (i = 0; i < qdev->rx_ring_count; i++)
3320 qdev->rx_ring[i].irq = 0;
3321 }
3322}
3323
3324/* Set the interrupt mask for this vector. Each vector
3325 * will service 1 RSS ring and 1 or more TX completion
3326 * rings. This function sets up a bit mask per vector
3327 * that indicates which rings it services.
3328 */
3329static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3330{
3331 int j, vect = ctx->intr;
3332 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3333
3334 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3335 /* Add the RSS ring serviced by this vector
3336 * to the mask.
3337 */
3338 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3339 /* Add the TX ring(s) serviced by this vector
3340 * to the mask. */
3341 for (j = 0; j < tx_rings_per_vector; j++) {
3342 ctx->irq_mask |=
3343 (1 << qdev->rx_ring[qdev->rss_ring_count +
3344 (vect * tx_rings_per_vector) + j].cq_id);
3345 }
3346 } else {
3347 /* For single vector we just shift each queue's
3348 * ID into the mask.
3349 */
3350 for (j = 0; j < qdev->rx_ring_count; j++)
3351 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3352 }
3353}
3354
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003355/*
3356 * Here we build the intr_context structures based on
3357 * our rx_ring count and intr vector count.
3358 * The intr_context structure is used to hook each vector
3359 * to possibly different handlers.
3360 */
3361static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3362{
3363 int i = 0;
3364 struct intr_context *intr_context = &qdev->intr_context[0];
3365
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003366 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3367 /* Each rx_ring has it's
3368 * own intr_context since we have separate
3369 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003370 */
3371 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3372 qdev->rx_ring[i].irq = i;
3373 intr_context->intr = i;
3374 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003375 /* Set up this vector's bit-mask that indicates
3376 * which queues it services.
3377 */
3378 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003379 /*
3380 * We set up each vectors enable/disable/read bits so
3381 * there's no bit/mask calculations in the critical path.
3382 */
3383 intr_context->intr_en_mask =
3384 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3386 | i;
3387 intr_context->intr_dis_mask =
3388 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3389 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3390 INTR_EN_IHD | i;
3391 intr_context->intr_read_mask =
3392 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3393 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3394 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003395 if (i == 0) {
3396 /* The first vector/queue handles
3397 * broadcast/multicast, fatal errors,
3398 * and firmware events. This in addition
3399 * to normal inbound NAPI processing.
3400 */
3401 intr_context->handler = qlge_isr;
3402 sprintf(intr_context->name, "%s-rx-%d",
3403 qdev->ndev->name, i);
3404 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003405 /*
3406 * Inbound queues handle unicast frames only.
3407 */
3408 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003409 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003410 qdev->ndev->name, i);
3411 }
3412 }
3413 } else {
3414 /*
3415 * All rx_rings use the same intr_context since
3416 * there is only one vector.
3417 */
3418 intr_context->intr = 0;
3419 intr_context->qdev = qdev;
3420 /*
3421 * We set up each vectors enable/disable/read bits so
3422 * there's no bit/mask calculations in the critical path.
3423 */
3424 intr_context->intr_en_mask =
3425 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3426 intr_context->intr_dis_mask =
3427 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3428 INTR_EN_TYPE_DISABLE;
3429 intr_context->intr_read_mask =
3430 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3431 /*
3432 * Single interrupt means one handler for all rings.
3433 */
3434 intr_context->handler = qlge_isr;
3435 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003436 /* Set up this vector's bit-mask that indicates
3437 * which queues it services. In this case there is
3438 * a single vector so it will service all RSS and
3439 * TX completion rings.
3440 */
3441 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003442 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003443 /* Tell the TX completion rings which MSIx vector
3444 * they will be using.
3445 */
3446 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003447}
3448
3449static void ql_free_irq(struct ql_adapter *qdev)
3450{
3451 int i;
3452 struct intr_context *intr_context = &qdev->intr_context[0];
3453
3454 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3455 if (intr_context->hooked) {
3456 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3457 free_irq(qdev->msi_x_entry[i].vector,
3458 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003459 } else {
3460 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003461 }
3462 }
3463 }
3464 ql_disable_msix(qdev);
3465}
3466
3467static int ql_request_irq(struct ql_adapter *qdev)
3468{
3469 int i;
3470 int status = 0;
3471 struct pci_dev *pdev = qdev->pdev;
3472 struct intr_context *intr_context = &qdev->intr_context[0];
3473
3474 ql_resolve_queues_to_irqs(qdev);
3475
3476 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3477 atomic_set(&intr_context->irq_cnt, 0);
3478 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3479 status = request_irq(qdev->msi_x_entry[i].vector,
3480 intr_context->handler,
3481 0,
3482 intr_context->name,
3483 &qdev->rx_ring[i]);
3484 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003485 netif_err(qdev, ifup, qdev->ndev,
3486 "Failed request for MSIX interrupt %d.\n",
3487 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003488 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003489 }
3490 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003491 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3492 "trying msi or legacy interrupts.\n");
3493 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3494 "%s: irq = %d.\n", __func__, pdev->irq);
3495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "%s: context->name = %s.\n", __func__,
3497 intr_context->name);
3498 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3499 "%s: dev_id = 0x%p.\n", __func__,
3500 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003501 status =
3502 request_irq(pdev->irq, qlge_isr,
3503 test_bit(QL_MSI_ENABLED,
3504 &qdev->
3505 flags) ? 0 : IRQF_SHARED,
3506 intr_context->name, &qdev->rx_ring[0]);
3507 if (status)
3508 goto err_irq;
3509
Joe Perchesae9540f72010-02-09 11:49:52 +00003510 netif_err(qdev, ifup, qdev->ndev,
3511 "Hooked intr %d, queue type %s, with name %s.\n",
3512 i,
3513 qdev->rx_ring[0].type == DEFAULT_Q ?
3514 "DEFAULT_Q" :
3515 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3516 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3517 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003518 }
3519 intr_context->hooked = 1;
3520 }
3521 return status;
3522err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003523 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003524 ql_free_irq(qdev);
3525 return status;
3526}
3527
3528static int ql_start_rss(struct ql_adapter *qdev)
3529{
Joe Perches215faf92010-12-21 02:16:10 -08003530 static const u8 init_hash_seed[] = {
3531 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3532 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3533 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3534 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3535 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3536 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003537 struct ricb *ricb = &qdev->ricb;
3538 int status = 0;
3539 int i;
3540 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3541
Ron Mercere3324712009-07-02 06:06:13 +00003542 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003543
Ron Mercerb2014ff2009-08-27 11:02:09 +00003544 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003545 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003546 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3547 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003548
3549 /*
3550 * Fill out the Indirection Table.
3551 */
Ron Mercer541ae282009-10-08 09:54:37 +00003552 for (i = 0; i < 1024; i++)
3553 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003554
Ron Mercer541ae282009-10-08 09:54:37 +00003555 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3556 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003557
Ron Mercere3324712009-07-02 06:06:13 +00003558 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003560 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003561 return status;
3562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 return status;
3564}
3565
Ron Mercera5f59dc2009-07-02 06:06:07 +00003566static int ql_clear_routing_entries(struct ql_adapter *qdev)
3567{
3568 int i, status = 0;
3569
3570 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3571 if (status)
3572 return status;
3573 /* Clear all the entries in the routing table. */
3574 for (i = 0; i < 16; i++) {
3575 status = ql_set_routing_reg(qdev, i, 0, 0);
3576 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003577 netif_err(qdev, ifup, qdev->ndev,
3578 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003579 break;
3580 }
3581 }
3582 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3583 return status;
3584}
3585
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586/* Initialize the frame-to-queue routing. */
3587static int ql_route_initialize(struct ql_adapter *qdev)
3588{
3589 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003590
3591 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003592 status = ql_clear_routing_entries(qdev);
3593 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003594 return status;
3595
3596 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3597 if (status)
3598 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003599
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003600 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3601 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003602 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003603 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003604 "Failed to init routing register "
3605 "for IP CSUM error packets.\n");
3606 goto exit;
3607 }
3608 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3609 RT_IDX_TU_CSUM_ERR, 1);
3610 if (status) {
3611 netif_err(qdev, ifup, qdev->ndev,
3612 "Failed to init routing register "
3613 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003614 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003615 }
3616 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3617 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003618 netif_err(qdev, ifup, qdev->ndev,
3619 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003620 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003621 }
3622 /* If we have more than one inbound queue, then turn on RSS in the
3623 * routing block.
3624 */
3625 if (qdev->rss_ring_count > 1) {
3626 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3627 RT_IDX_RSS_MATCH, 1);
3628 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003629 netif_err(qdev, ifup, qdev->ndev,
3630 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003631 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003632 }
3633 }
3634
3635 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3636 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003637 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003638 netif_err(qdev, ifup, qdev->ndev,
3639 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003640exit:
3641 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003642 return status;
3643}
3644
Ron Mercer2ee1e272009-03-03 12:10:33 +00003645int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003646{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003647 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003648
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003649 /* If check if the link is up and use to
3650 * determine if we are setting or clearing
3651 * the MAC address in the CAM.
3652 */
3653 set = ql_read32(qdev, STS);
3654 set &= qdev->port_link_up;
3655 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003656 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003657 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003658 return status;
3659 }
3660
3661 status = ql_route_initialize(qdev);
3662 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003663 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003664
3665 return status;
3666}
3667
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003668static int ql_adapter_initialize(struct ql_adapter *qdev)
3669{
3670 u32 value, mask;
3671 int i;
3672 int status = 0;
3673
3674 /*
3675 * Set up the System register to halt on errors.
3676 */
3677 value = SYS_EFE | SYS_FAE;
3678 mask = value << 16;
3679 ql_write32(qdev, SYS, mask | value);
3680
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003681 /* Set the default queue, and VLAN behavior. */
3682 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3683 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003684 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3685
3686 /* Set the MPI interrupt to enabled. */
3687 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3688
3689 /* Enable the function, set pagesize, enable error checking. */
3690 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003691 FSC_EC | FSC_VM_PAGE_4K;
3692 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003693
3694 /* Set/clear header splitting. */
3695 mask = FSC_VM_PAGESIZE_MASK |
3696 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3697 ql_write32(qdev, FSC, mask | value);
3698
Ron Mercer572c5262010-01-02 10:37:42 +00003699 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003700
Ron Mercera3b71932009-10-08 09:54:38 +00003701 /* Set RX packet routing to use port/pci function on which the
3702 * packet arrived on in addition to usual frame routing.
3703 * This is helpful on bonding where both interfaces can have
3704 * the same MAC address.
3705 */
3706 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003707 /* Reroute all packets to our Interface.
3708 * They may have been routed to MPI firmware
3709 * due to WOL.
3710 */
3711 value = ql_read32(qdev, MGMT_RCV_CFG);
3712 value &= ~MGMT_RCV_CFG_RM;
3713 mask = 0xffff0000;
3714
3715 /* Sticky reg needs clearing due to WOL. */
3716 ql_write32(qdev, MGMT_RCV_CFG, mask);
3717 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3718
3719 /* Default WOL is enable on Mezz cards */
3720 if (qdev->pdev->subsystem_device == 0x0068 ||
3721 qdev->pdev->subsystem_device == 0x0180)
3722 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003723
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003724 /* Start up the rx queues. */
3725 for (i = 0; i < qdev->rx_ring_count; i++) {
3726 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3727 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003728 netif_err(qdev, ifup, qdev->ndev,
3729 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003730 return status;
3731 }
3732 }
3733
3734 /* If there is more than one inbound completion queue
3735 * then download a RICB to configure RSS.
3736 */
3737 if (qdev->rss_ring_count > 1) {
3738 status = ql_start_rss(qdev);
3739 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003740 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003741 return status;
3742 }
3743 }
3744
3745 /* Start up the tx queues. */
3746 for (i = 0; i < qdev->tx_ring_count; i++) {
3747 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3748 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003749 netif_err(qdev, ifup, qdev->ndev,
3750 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003751 return status;
3752 }
3753 }
3754
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003755 /* Initialize the port and set the max framesize. */
3756 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003757 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003758 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003759
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003760 /* Set up the MAC address and frame routing filter. */
3761 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003762 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003763 netif_err(qdev, ifup, qdev->ndev,
3764 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003765 return status;
3766 }
3767
3768 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003769 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003770 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003771
3772 return status;
3773}
3774
3775/* Issue soft reset to chip. */
3776static int ql_adapter_reset(struct ql_adapter *qdev)
3777{
3778 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003779 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003780 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003781
Ron Mercera5f59dc2009-07-02 06:06:07 +00003782 /* Clear all the entries in the routing table. */
3783 status = ql_clear_routing_entries(qdev);
3784 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003785 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003786 return status;
3787 }
3788
3789 end_jiffies = jiffies +
3790 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003791
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003792 /* Check if bit is set then skip the mailbox command and
3793 * clear the bit, else we are in normal reset process.
3794 */
3795 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3796 /* Stop management traffic. */
3797 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003798
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003799 /* Wait for the NIC and MGMNT FIFOs to empty. */
3800 ql_wait_fifo_empty(qdev);
3801 } else
3802 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003803
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003804 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003805
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003806 do {
3807 value = ql_read32(qdev, RST_FO);
3808 if ((value & RST_FO_FR) == 0)
3809 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003810 cpu_relax();
3811 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003812
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003814 netif_err(qdev, ifdown, qdev->ndev,
3815 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003816 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003817 }
3818
Ron Mercer84087f42009-10-08 09:54:41 +00003819 /* Resume management traffic. */
3820 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003821 return status;
3822}
3823
3824static void ql_display_dev_info(struct net_device *ndev)
3825{
Joe Perchesb16fed02010-11-15 11:12:28 +00003826 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003827
Joe Perchesae9540f72010-02-09 11:49:52 +00003828 netif_info(qdev, probe, qdev->ndev,
3829 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3830 "XG Roll = %d, XG Rev = %d.\n",
3831 qdev->func,
3832 qdev->port,
3833 qdev->chip_rev_id & 0x0000000f,
3834 qdev->chip_rev_id >> 4 & 0x0000000f,
3835 qdev->chip_rev_id >> 8 & 0x0000000f,
3836 qdev->chip_rev_id >> 12 & 0x0000000f);
3837 netif_info(qdev, probe, qdev->ndev,
3838 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003839}
3840
stephen hemmingerac409212010-10-21 07:50:54 +00003841static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003842{
3843 int status = 0;
3844 u32 wol = MB_WOL_DISABLE;
3845
3846 /* The CAM is still intact after a reset, but if we
3847 * are doing WOL, then we may need to program the
3848 * routing regs. We would also need to issue the mailbox
3849 * commands to instruct the MPI what to do per the ethtool
3850 * settings.
3851 */
3852
3853 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3854 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003855 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003856 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003857 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003858 return -EINVAL;
3859 }
3860
3861 if (qdev->wol & WAKE_MAGIC) {
3862 status = ql_mb_wol_set_magic(qdev, 1);
3863 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003864 netif_err(qdev, ifdown, qdev->ndev,
3865 "Failed to set magic packet on %s.\n",
3866 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003867 return status;
3868 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003869 netif_info(qdev, drv, qdev->ndev,
3870 "Enabled magic packet successfully on %s.\n",
3871 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003872
3873 wol |= MB_WOL_MAGIC_PKT;
3874 }
3875
3876 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003877 wol |= MB_WOL_MODE_ON;
3878 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003879 netif_err(qdev, drv, qdev->ndev,
3880 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003881 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003882 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003883 }
3884
3885 return status;
3886}
3887
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003888static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003889{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003890
Ron Mercer6497b602009-02-12 16:37:13 -08003891 /* Don't kill the reset worker thread if we
3892 * are in the process of recovery.
3893 */
3894 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3895 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003896 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3897 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003898 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003899 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003900 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003901}
3902
3903static int ql_adapter_down(struct ql_adapter *qdev)
3904{
3905 int i, status = 0;
3906
3907 ql_link_off(qdev);
3908
3909 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003910
Ron Mercer39aa8162009-08-27 11:02:11 +00003911 for (i = 0; i < qdev->rss_ring_count; i++)
3912 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003913
3914 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3915
3916 ql_disable_interrupts(qdev);
3917
3918 ql_tx_ring_clean(qdev);
3919
Ron Mercer6b318cb2009-03-09 10:59:26 +00003920 /* Call netif_napi_del() from common point.
3921 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003922 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003923 netif_napi_del(&qdev->rx_ring[i].napi);
3924
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003925 status = ql_adapter_reset(qdev);
3926 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003927 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3928 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003929 ql_free_rx_buffers(qdev);
3930
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003931 return status;
3932}
3933
3934static int ql_adapter_up(struct ql_adapter *qdev)
3935{
3936 int err = 0;
3937
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003938 err = ql_adapter_initialize(qdev);
3939 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003940 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003941 goto err_init;
3942 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003943 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003944 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003945 /* If the port is initialized and the
3946 * link is up the turn on the carrier.
3947 */
3948 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3949 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003950 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003951 /* Restore rx mode. */
3952 clear_bit(QL_ALLMULTI, &qdev->flags);
3953 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3954 qlge_set_multicast_list(qdev->ndev);
3955
Ron Mercerc1b60092010-10-27 04:58:12 +00003956 /* Restore vlan setting. */
3957 qlge_restore_vlan(qdev);
3958
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003959 ql_enable_interrupts(qdev);
3960 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003961 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003962
3963 return 0;
3964err_init:
3965 ql_adapter_reset(qdev);
3966 return err;
3967}
3968
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003969static void ql_release_adapter_resources(struct ql_adapter *qdev)
3970{
3971 ql_free_mem_resources(qdev);
3972 ql_free_irq(qdev);
3973}
3974
3975static int ql_get_adapter_resources(struct ql_adapter *qdev)
3976{
3977 int status = 0;
3978
3979 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003980 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003981 return -ENOMEM;
3982 }
3983 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003984 return status;
3985}
3986
3987static int qlge_close(struct net_device *ndev)
3988{
3989 struct ql_adapter *qdev = netdev_priv(ndev);
3990
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003991 /* If we hit pci_channel_io_perm_failure
3992 * failure condition, then we already
3993 * brought the adapter down.
3994 */
3995 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003996 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003997 clear_bit(QL_EEH_FATAL, &qdev->flags);
3998 return 0;
3999 }
4000
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004001 /*
4002 * Wait for device to recover from a reset.
4003 * (Rarely happens, but possible.)
4004 */
4005 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4006 msleep(1);
4007 ql_adapter_down(qdev);
4008 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004009 return 0;
4010}
4011
4012static int ql_configure_rings(struct ql_adapter *qdev)
4013{
4014 int i;
4015 struct rx_ring *rx_ring;
4016 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004017 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004018 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4019 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4020
4021 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004022
Ron Mercera4ab6132009-08-27 11:02:10 +00004023 /* In a perfect world we have one RSS ring for each CPU
4024 * and each has it's own vector. To do that we ask for
4025 * cpu_cnt vectors. ql_enable_msix() will adjust the
4026 * vector count to what we actually get. We then
4027 * allocate an RSS ring for each.
4028 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004029 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004030 qdev->intr_count = cpu_cnt;
4031 ql_enable_msix(qdev);
4032 /* Adjust the RSS ring count to the actual vector count. */
4033 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004034 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004035 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004036
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004037 for (i = 0; i < qdev->tx_ring_count; i++) {
4038 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004039 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004040 tx_ring->qdev = qdev;
4041 tx_ring->wq_id = i;
4042 tx_ring->wq_len = qdev->tx_ring_size;
4043 tx_ring->wq_size =
4044 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4045
4046 /*
4047 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004048 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004049 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004050 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004051 }
4052
4053 for (i = 0; i < qdev->rx_ring_count; i++) {
4054 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004055 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004056 rx_ring->qdev = qdev;
4057 rx_ring->cq_id = i;
4058 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004059 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004060 /*
4061 * Inbound (RSS) queues.
4062 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004063 rx_ring->cq_len = qdev->rx_ring_size;
4064 rx_ring->cq_size =
4065 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4066 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4067 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004068 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004069 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004070 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4071 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004072 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004073 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004074 rx_ring->type = RX_Q;
4075 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004076 /*
4077 * Outbound queue handles outbound completions only.
4078 */
4079 /* outbound cq is same size as tx_ring it services. */
4080 rx_ring->cq_len = qdev->tx_ring_size;
4081 rx_ring->cq_size =
4082 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4083 rx_ring->lbq_len = 0;
4084 rx_ring->lbq_size = 0;
4085 rx_ring->lbq_buf_size = 0;
4086 rx_ring->sbq_len = 0;
4087 rx_ring->sbq_size = 0;
4088 rx_ring->sbq_buf_size = 0;
4089 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004090 }
4091 }
4092 return 0;
4093}
4094
4095static int qlge_open(struct net_device *ndev)
4096{
4097 int err = 0;
4098 struct ql_adapter *qdev = netdev_priv(ndev);
4099
Ron Mercer74e12432009-11-11 12:54:04 +00004100 err = ql_adapter_reset(qdev);
4101 if (err)
4102 return err;
4103
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004104 err = ql_configure_rings(qdev);
4105 if (err)
4106 return err;
4107
4108 err = ql_get_adapter_resources(qdev);
4109 if (err)
4110 goto error_up;
4111
4112 err = ql_adapter_up(qdev);
4113 if (err)
4114 goto error_up;
4115
4116 return err;
4117
4118error_up:
4119 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004120 return err;
4121}
4122
Ron Mercer7c734352009-10-19 03:32:19 +00004123static int ql_change_rx_buffers(struct ql_adapter *qdev)
4124{
4125 struct rx_ring *rx_ring;
4126 int i, status;
4127 u32 lbq_buf_len;
4128
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004129 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004130 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4131 int i = 3;
4132 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004133 netif_err(qdev, ifup, qdev->ndev,
4134 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004135 ssleep(1);
4136 }
4137
4138 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004139 netif_err(qdev, ifup, qdev->ndev,
4140 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004141 return -ETIMEDOUT;
4142 }
4143 }
4144
4145 status = ql_adapter_down(qdev);
4146 if (status)
4147 goto error;
4148
4149 /* Get the new rx buffer size. */
4150 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4151 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4152 qdev->lbq_buf_order = get_order(lbq_buf_len);
4153
4154 for (i = 0; i < qdev->rss_ring_count; i++) {
4155 rx_ring = &qdev->rx_ring[i];
4156 /* Set the new size. */
4157 rx_ring->lbq_buf_size = lbq_buf_len;
4158 }
4159
4160 status = ql_adapter_up(qdev);
4161 if (status)
4162 goto error;
4163
4164 return status;
4165error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004166 netif_alert(qdev, ifup, qdev->ndev,
4167 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004168 set_bit(QL_ADAPTER_UP, &qdev->flags);
4169 dev_close(qdev->ndev);
4170 return status;
4171}
4172
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004173static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4174{
4175 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004176 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004177
4178 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004179 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004180 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004181 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004182 } else
4183 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004184
4185 queue_delayed_work(qdev->workqueue,
4186 &qdev->mpi_port_cfg_work, 3*HZ);
4187
Breno Leitao746079d2010-02-04 10:11:19 +00004188 ndev->mtu = new_mtu;
4189
Ron Mercer7c734352009-10-19 03:32:19 +00004190 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004191 return 0;
4192 }
4193
Ron Mercer7c734352009-10-19 03:32:19 +00004194 status = ql_change_rx_buffers(qdev);
4195 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004196 netif_err(qdev, ifup, qdev->ndev,
4197 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004198 }
4199
4200 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004201}
4202
4203static struct net_device_stats *qlge_get_stats(struct net_device
4204 *ndev)
4205{
Ron Mercer885ee392009-11-03 13:49:31 +00004206 struct ql_adapter *qdev = netdev_priv(ndev);
4207 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4208 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4209 unsigned long pkts, mcast, dropped, errors, bytes;
4210 int i;
4211
4212 /* Get RX stats. */
4213 pkts = mcast = dropped = errors = bytes = 0;
4214 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4215 pkts += rx_ring->rx_packets;
4216 bytes += rx_ring->rx_bytes;
4217 dropped += rx_ring->rx_dropped;
4218 errors += rx_ring->rx_errors;
4219 mcast += rx_ring->rx_multicast;
4220 }
4221 ndev->stats.rx_packets = pkts;
4222 ndev->stats.rx_bytes = bytes;
4223 ndev->stats.rx_dropped = dropped;
4224 ndev->stats.rx_errors = errors;
4225 ndev->stats.multicast = mcast;
4226
4227 /* Get TX stats. */
4228 pkts = errors = bytes = 0;
4229 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4230 pkts += tx_ring->tx_packets;
4231 bytes += tx_ring->tx_bytes;
4232 errors += tx_ring->tx_errors;
4233 }
4234 ndev->stats.tx_packets = pkts;
4235 ndev->stats.tx_bytes = bytes;
4236 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004237 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004238}
4239
stephen hemmingerac409212010-10-21 07:50:54 +00004240static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004241{
Joe Perchesb16fed02010-11-15 11:12:28 +00004242 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004243 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004244 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004245
Ron Mercercc288f52009-02-23 10:42:14 +00004246 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4247 if (status)
4248 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004249 /*
4250 * Set or clear promiscuous mode if a
4251 * transition is taking place.
4252 */
4253 if (ndev->flags & IFF_PROMISC) {
4254 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4255 if (ql_set_routing_reg
4256 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004257 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004258 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004259 } else {
4260 set_bit(QL_PROMISCUOUS, &qdev->flags);
4261 }
4262 }
4263 } else {
4264 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4265 if (ql_set_routing_reg
4266 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004267 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004268 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004269 } else {
4270 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4271 }
4272 }
4273 }
4274
4275 /*
4276 * Set or clear all multicast mode if a
4277 * transition is taking place.
4278 */
4279 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004280 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004281 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4282 if (ql_set_routing_reg
4283 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004284 netif_err(qdev, hw, qdev->ndev,
4285 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004286 } else {
4287 set_bit(QL_ALLMULTI, &qdev->flags);
4288 }
4289 }
4290 } else {
4291 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4292 if (ql_set_routing_reg
4293 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004294 netif_err(qdev, hw, qdev->ndev,
4295 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004296 } else {
4297 clear_bit(QL_ALLMULTI, &qdev->flags);
4298 }
4299 }
4300 }
4301
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004302 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004303 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4304 if (status)
4305 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004306 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004307 netdev_for_each_mc_addr(ha, ndev) {
4308 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004309 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004310 netif_err(qdev, hw, qdev->ndev,
4311 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004312 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004313 goto exit;
4314 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004315 i++;
4316 }
Ron Mercercc288f52009-02-23 10:42:14 +00004317 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004318 if (ql_set_routing_reg
4319 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004320 netif_err(qdev, hw, qdev->ndev,
4321 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004322 } else {
4323 set_bit(QL_ALLMULTI, &qdev->flags);
4324 }
4325 }
4326exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004327 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004328}
4329
4330static int qlge_set_mac_address(struct net_device *ndev, void *p)
4331{
Joe Perchesb16fed02010-11-15 11:12:28 +00004332 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004334 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004335
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004336 if (!is_valid_ether_addr(addr->sa_data))
4337 return -EADDRNOTAVAIL;
4338 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004339 /* Update local copy of current mac address. */
4340 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004341
Ron Mercercc288f52009-02-23 10:42:14 +00004342 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4343 if (status)
4344 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004345 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4346 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004347 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004348 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004349 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4350 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004351}
4352
4353static void qlge_tx_timeout(struct net_device *ndev)
4354{
Joe Perchesb16fed02010-11-15 11:12:28 +00004355 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004356 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004357}
4358
4359static void ql_asic_reset_work(struct work_struct *work)
4360{
4361 struct ql_adapter *qdev =
4362 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004363 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004364 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004365 status = ql_adapter_down(qdev);
4366 if (status)
4367 goto error;
4368
4369 status = ql_adapter_up(qdev);
4370 if (status)
4371 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004372
4373 /* Restore rx mode. */
4374 clear_bit(QL_ALLMULTI, &qdev->flags);
4375 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4376 qlge_set_multicast_list(qdev->ndev);
4377
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004378 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004379 return;
4380error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004381 netif_alert(qdev, ifup, qdev->ndev,
4382 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004383
Ron Mercerdb988122009-03-09 10:59:17 +00004384 set_bit(QL_ADAPTER_UP, &qdev->flags);
4385 dev_close(qdev->ndev);
4386 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004387}
4388
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004389static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004390 .get_flash = ql_get_8012_flash_params,
4391 .port_initialize = ql_8012_port_initialize,
4392};
4393
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004394static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004395 .get_flash = ql_get_8000_flash_params,
4396 .port_initialize = ql_8000_port_initialize,
4397};
4398
Ron Mercere4552f52009-06-09 05:39:32 +00004399/* Find the pcie function number for the other NIC
4400 * on this chip. Since both NIC functions share a
4401 * common firmware we have the lowest enabled function
4402 * do any common work. Examples would be resetting
4403 * after a fatal firmware error, or doing a firmware
4404 * coredump.
4405 */
4406static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004407{
Ron Mercere4552f52009-06-09 05:39:32 +00004408 int status = 0;
4409 u32 temp;
4410 u32 nic_func1, nic_func2;
4411
4412 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4413 &temp);
4414 if (status)
4415 return status;
4416
4417 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4418 MPI_TEST_NIC_FUNC_MASK);
4419 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4420 MPI_TEST_NIC_FUNC_MASK);
4421
4422 if (qdev->func == nic_func1)
4423 qdev->alt_func = nic_func2;
4424 else if (qdev->func == nic_func2)
4425 qdev->alt_func = nic_func1;
4426 else
4427 status = -EIO;
4428
4429 return status;
4430}
4431
4432static int ql_get_board_info(struct ql_adapter *qdev)
4433{
4434 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004435 qdev->func =
4436 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004437 if (qdev->func > 3)
4438 return -EIO;
4439
4440 status = ql_get_alt_pcie_func(qdev);
4441 if (status)
4442 return status;
4443
4444 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4445 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004446 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4447 qdev->port_link_up = STS_PL1;
4448 qdev->port_init = STS_PI1;
4449 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4450 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4451 } else {
4452 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4453 qdev->port_link_up = STS_PL0;
4454 qdev->port_init = STS_PI0;
4455 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4456 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4457 }
4458 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004459 qdev->device_id = qdev->pdev->device;
4460 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4461 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004462 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4463 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004464 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004465}
4466
4467static void ql_release_all(struct pci_dev *pdev)
4468{
4469 struct net_device *ndev = pci_get_drvdata(pdev);
4470 struct ql_adapter *qdev = netdev_priv(ndev);
4471
4472 if (qdev->workqueue) {
4473 destroy_workqueue(qdev->workqueue);
4474 qdev->workqueue = NULL;
4475 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004476
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004477 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004478 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004479 if (qdev->doorbell_area)
4480 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004481 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004482 pci_release_regions(pdev);
4483 pci_set_drvdata(pdev, NULL);
4484}
4485
4486static int __devinit ql_init_device(struct pci_dev *pdev,
4487 struct net_device *ndev, int cards_found)
4488{
4489 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004490 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004491
Ron Mercere3324712009-07-02 06:06:13 +00004492 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004493 err = pci_enable_device(pdev);
4494 if (err) {
4495 dev_err(&pdev->dev, "PCI device enable failed.\n");
4496 return err;
4497 }
4498
Ron Mercerebd6e772009-09-29 08:39:25 +00004499 qdev->ndev = ndev;
4500 qdev->pdev = pdev;
4501 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004502
Ron Mercerbc9167f2009-10-10 09:35:04 +00004503 /* Set PCIe read request size */
4504 err = pcie_set_readrq(pdev, 4096);
4505 if (err) {
4506 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004507 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004508 }
4509
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004510 err = pci_request_regions(pdev, DRV_NAME);
4511 if (err) {
4512 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004513 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004514 }
4515
4516 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004517 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004518 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004519 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004520 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004521 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004522 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004523 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004524 }
4525
4526 if (err) {
4527 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004528 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004529 }
4530
Ron Mercer73475332009-11-06 07:44:58 +00004531 /* Set PCIe reset type for EEH to fundamental. */
4532 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004533 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004534 qdev->reg_base =
4535 ioremap_nocache(pci_resource_start(pdev, 1),
4536 pci_resource_len(pdev, 1));
4537 if (!qdev->reg_base) {
4538 dev_err(&pdev->dev, "Register mapping failed.\n");
4539 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004540 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004541 }
4542
4543 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4544 qdev->doorbell_area =
4545 ioremap_nocache(pci_resource_start(pdev, 3),
4546 pci_resource_len(pdev, 3));
4547 if (!qdev->doorbell_area) {
4548 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4549 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004550 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004551 }
4552
Ron Mercere4552f52009-06-09 05:39:32 +00004553 err = ql_get_board_info(qdev);
4554 if (err) {
4555 dev_err(&pdev->dev, "Register access failed.\n");
4556 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004557 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004558 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004559 qdev->msg_enable = netif_msg_init(debug, default_msg);
4560 spin_lock_init(&qdev->hw_lock);
4561 spin_lock_init(&qdev->stats_lock);
4562
Ron Mercer8aae2602010-01-15 13:31:28 +00004563 if (qlge_mpi_coredump) {
4564 qdev->mpi_coredump =
4565 vmalloc(sizeof(struct ql_mpi_coredump));
4566 if (qdev->mpi_coredump == NULL) {
4567 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4568 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004569 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004570 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004571 if (qlge_force_coredump)
4572 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004573 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004575 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576 if (err) {
4577 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004578 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004579 }
4580
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004581 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004582 /* Keep local copy of current mac address. */
4583 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004584
4585 /* Set up the default ring sizes. */
4586 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4587 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4588
4589 /* Set up the coalescing parameters. */
4590 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4591 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4592 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4593 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4594
4595 /*
4596 * Set up the operating parameters.
4597 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004598 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4599 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4600 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4601 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004602 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004603 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004604 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004605 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004606 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004607
4608 if (!cards_found) {
4609 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4610 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4611 DRV_NAME, DRV_VERSION);
4612 }
4613 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004614err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004615 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004616err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004617 pci_disable_device(pdev);
4618 return err;
4619}
4620
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004621static const struct net_device_ops qlge_netdev_ops = {
4622 .ndo_open = qlge_open,
4623 .ndo_stop = qlge_close,
4624 .ndo_start_xmit = qlge_send,
4625 .ndo_change_mtu = qlge_change_mtu,
4626 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004627 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004628 .ndo_set_mac_address = qlge_set_mac_address,
4629 .ndo_validate_addr = eth_validate_addr,
4630 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004631 .ndo_fix_features = qlge_fix_features,
4632 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004633 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4634 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004635};
4636
Ron Mercer15c052f2010-02-04 13:32:46 -08004637static void ql_timer(unsigned long data)
4638{
4639 struct ql_adapter *qdev = (struct ql_adapter *)data;
4640 u32 var = 0;
4641
4642 var = ql_read32(qdev, STS);
4643 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004644 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004645 return;
4646 }
4647
Breno Leitao72046d82010-07-01 03:00:17 +00004648 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004649}
4650
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004651static int __devinit qlge_probe(struct pci_dev *pdev,
4652 const struct pci_device_id *pci_entry)
4653{
4654 struct net_device *ndev = NULL;
4655 struct ql_adapter *qdev = NULL;
4656 static int cards_found = 0;
4657 int err = 0;
4658
Ron Mercer1e213302009-03-09 10:59:21 +00004659 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004660 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004661 if (!ndev)
4662 return -ENOMEM;
4663
4664 err = ql_init_device(pdev, ndev, cards_found);
4665 if (err < 0) {
4666 free_netdev(ndev);
4667 return err;
4668 }
4669
4670 qdev = netdev_priv(ndev);
4671 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004672 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4673 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4674 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4675 ndev->features = ndev->hw_features |
4676 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004677
4678 if (test_bit(QL_DMA64, &qdev->flags))
4679 ndev->features |= NETIF_F_HIGHDMA;
4680
4681 /*
4682 * Set up net_device structure.
4683 */
4684 ndev->tx_queue_len = qdev->tx_ring_size;
4685 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004686
4687 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004688 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004689 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004690
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004691 err = register_netdev(ndev);
4692 if (err) {
4693 dev_err(&pdev->dev, "net device registration failed.\n");
4694 ql_release_all(pdev);
4695 pci_disable_device(pdev);
4696 return err;
4697 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004698 /* Start up the timer to trigger EEH if
4699 * the bus goes dead
4700 */
4701 init_timer_deferrable(&qdev->timer);
4702 qdev->timer.data = (unsigned long)qdev;
4703 qdev->timer.function = ql_timer;
4704 qdev->timer.expires = jiffies + (5*HZ);
4705 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004706 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004707 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004708 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004709 cards_found++;
4710 return 0;
4711}
4712
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004713netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4714{
4715 return qlge_send(skb, ndev);
4716}
4717
4718int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4719{
4720 return ql_clean_inbound_rx_ring(rx_ring, budget);
4721}
4722
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004723static void __devexit qlge_remove(struct pci_dev *pdev)
4724{
4725 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004726 struct ql_adapter *qdev = netdev_priv(ndev);
4727 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004728 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004729 unregister_netdev(ndev);
4730 ql_release_all(pdev);
4731 pci_disable_device(pdev);
4732 free_netdev(ndev);
4733}
4734
Ron Mercer6d190c62009-10-28 08:39:20 +00004735/* Clean up resources without touching hardware. */
4736static void ql_eeh_close(struct net_device *ndev)
4737{
4738 int i;
4739 struct ql_adapter *qdev = netdev_priv(ndev);
4740
4741 if (netif_carrier_ok(ndev)) {
4742 netif_carrier_off(ndev);
4743 netif_stop_queue(ndev);
4744 }
4745
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004746 /* Disabling the timer */
4747 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004748 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004749
4750 for (i = 0; i < qdev->rss_ring_count; i++)
4751 netif_napi_del(&qdev->rx_ring[i].napi);
4752
4753 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4754 ql_tx_ring_clean(qdev);
4755 ql_free_rx_buffers(qdev);
4756 ql_release_adapter_resources(qdev);
4757}
4758
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004759/*
4760 * This callback is called by the PCI subsystem whenever
4761 * a PCI bus error is detected.
4762 */
4763static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4764 enum pci_channel_state state)
4765{
4766 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004767 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004768
Ron Mercer6d190c62009-10-28 08:39:20 +00004769 switch (state) {
4770 case pci_channel_io_normal:
4771 return PCI_ERS_RESULT_CAN_RECOVER;
4772 case pci_channel_io_frozen:
4773 netif_device_detach(ndev);
4774 if (netif_running(ndev))
4775 ql_eeh_close(ndev);
4776 pci_disable_device(pdev);
4777 return PCI_ERS_RESULT_NEED_RESET;
4778 case pci_channel_io_perm_failure:
4779 dev_err(&pdev->dev,
4780 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004781 ql_eeh_close(ndev);
4782 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004783 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004784 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004785
4786 /* Request a slot reset. */
4787 return PCI_ERS_RESULT_NEED_RESET;
4788}
4789
4790/*
4791 * This callback is called after the PCI buss has been reset.
4792 * Basically, this tries to restart the card from scratch.
4793 * This is a shortened version of the device probe/discovery code,
4794 * it resembles the first-half of the () routine.
4795 */
4796static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4797{
4798 struct net_device *ndev = pci_get_drvdata(pdev);
4799 struct ql_adapter *qdev = netdev_priv(ndev);
4800
Ron Mercer6d190c62009-10-28 08:39:20 +00004801 pdev->error_state = pci_channel_io_normal;
4802
4803 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004804 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004805 netif_err(qdev, ifup, qdev->ndev,
4806 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004807 return PCI_ERS_RESULT_DISCONNECT;
4808 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004809 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004810
4811 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004812 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004813 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004814 return PCI_ERS_RESULT_DISCONNECT;
4815 }
4816
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004817 return PCI_ERS_RESULT_RECOVERED;
4818}
4819
4820static void qlge_io_resume(struct pci_dev *pdev)
4821{
4822 struct net_device *ndev = pci_get_drvdata(pdev);
4823 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004824 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004825
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004826 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004827 err = qlge_open(ndev);
4828 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004829 netif_err(qdev, ifup, qdev->ndev,
4830 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004831 return;
4832 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004833 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004834 netif_err(qdev, ifup, qdev->ndev,
4835 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004836 }
Breno Leitao72046d82010-07-01 03:00:17 +00004837 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004838 netif_device_attach(ndev);
4839}
4840
4841static struct pci_error_handlers qlge_err_handler = {
4842 .error_detected = qlge_io_error_detected,
4843 .slot_reset = qlge_io_slot_reset,
4844 .resume = qlge_io_resume,
4845};
4846
4847static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4848{
4849 struct net_device *ndev = pci_get_drvdata(pdev);
4850 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004851 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004852
4853 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004854 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004855
4856 if (netif_running(ndev)) {
4857 err = ql_adapter_down(qdev);
4858 if (!err)
4859 return err;
4860 }
4861
Ron Mercerbc083ce2009-10-21 11:07:40 +00004862 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004863 err = pci_save_state(pdev);
4864 if (err)
4865 return err;
4866
4867 pci_disable_device(pdev);
4868
4869 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4870
4871 return 0;
4872}
4873
David S. Miller04da2cf2008-09-19 16:14:24 -07004874#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004875static int qlge_resume(struct pci_dev *pdev)
4876{
4877 struct net_device *ndev = pci_get_drvdata(pdev);
4878 struct ql_adapter *qdev = netdev_priv(ndev);
4879 int err;
4880
4881 pci_set_power_state(pdev, PCI_D0);
4882 pci_restore_state(pdev);
4883 err = pci_enable_device(pdev);
4884 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004885 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004886 return err;
4887 }
4888 pci_set_master(pdev);
4889
4890 pci_enable_wake(pdev, PCI_D3hot, 0);
4891 pci_enable_wake(pdev, PCI_D3cold, 0);
4892
4893 if (netif_running(ndev)) {
4894 err = ql_adapter_up(qdev);
4895 if (err)
4896 return err;
4897 }
4898
Breno Leitao72046d82010-07-01 03:00:17 +00004899 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004900 netif_device_attach(ndev);
4901
4902 return 0;
4903}
David S. Miller04da2cf2008-09-19 16:14:24 -07004904#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004905
4906static void qlge_shutdown(struct pci_dev *pdev)
4907{
4908 qlge_suspend(pdev, PMSG_SUSPEND);
4909}
4910
4911static struct pci_driver qlge_driver = {
4912 .name = DRV_NAME,
4913 .id_table = qlge_pci_tbl,
4914 .probe = qlge_probe,
4915 .remove = __devexit_p(qlge_remove),
4916#ifdef CONFIG_PM
4917 .suspend = qlge_suspend,
4918 .resume = qlge_resume,
4919#endif
4920 .shutdown = qlge_shutdown,
4921 .err_handler = &qlge_err_handler
4922};
4923
4924static int __init qlge_init_module(void)
4925{
4926 return pci_register_driver(&qlge_driver);
4927}
4928
4929static void __exit qlge_exit(void)
4930{
4931 pci_unregister_driver(&qlge_driver);
4932}
4933
4934module_init(qlge_init_module);
4935module_exit(qlge_exit);