blob: af12e80975d158d11230b54d6c61cf1e854de242 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
Ron Mercer76b26692009-10-08 09:54:40 +0000408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400426 status =
427 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 status = -EPERM;
442 }
443exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 return status;
445}
446
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000447/* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452{
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000458 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000476 return status;
477}
478
Ron Mercer6a473302009-07-02 06:06:12 +0000479void ql_link_on(struct ql_adapter *qdev)
480{
Joe Perchesae9540f72010-02-09 11:49:52 +0000481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484}
485
486void ql_link_off(struct ql_adapter *qdev)
487{
Joe Perchesae9540f72010-02-09 11:49:52 +0000488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000572 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000579 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 return status;
615}
616
617static void ql_enable_interrupts(struct ql_adapter *qdev)
618{
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620}
621
622static void ql_disable_interrupts(struct ql_adapter *qdev)
623{
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625}
626
627/* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300630 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700633u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657}
658
659static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660{
661 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000671 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700672 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 var = ql_read32(qdev, STS);
676 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000678 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 return var;
680}
681
682static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683{
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696}
697
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000698static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699{
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000716
717 return csum;
718}
719
Ron Mercer26351472009-02-02 13:53:57 -0800720static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400740exit:
741 return status;
742}
743
Ron Mercercdca8d02009-03-02 08:07:31 +0000744static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745{
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000750 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
Ron Mercere4552f52009-06-09 05:39:32 +0000755 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 status = -EINVAL;
779 goto exit;
780 }
781
Ron Mercer542512e2009-06-09 05:39:33 +0000782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000801 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 qdev->ndev->addr_len);
803
804exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807}
808
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000809static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400810{
811 int i;
812 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800813 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800814 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816
817 /* Second function's parameters follow the first
818 * function's.
819 */
Ron Mercere4552f52009-06-09 05:39:32 +0000820 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000821 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800827 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 goto exit;
832 }
833
834 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400854exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857}
858
859/* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864{
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876}
877
878/* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883{
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899exit:
900 return status;
901}
902
903/* This is used for reading the 64-bit statistics regs. */
904int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905{
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920exit:
921 return status;
922}
923
Ron Mercercdca8d02009-03-02 08:07:31 +0000924static int ql_8000_port_initialize(struct ql_adapter *qdev)
925{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000926 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939exit:
940 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000941}
942
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400943/* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000949static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400950{
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964 }
965 return status;
966 }
967
Joe Perchesae9540f72010-02-09 11:49:52 +0000968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022}
1023
Ron Mercer7c734352009-10-19 03:32:19 +00001024static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025{
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027}
1028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001029/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001030static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031{
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038}
1039
Ron Mercer7c734352009-10-19 03:32:19 +00001040static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042{
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001046 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071}
1072
1073/* Update an rx ring index. */
1074static void ql_update_cq(struct rx_ring *rx_ring)
1075{
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082}
1083
1084static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085{
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087}
1088
Ron Mercer7c734352009-10-19 03:32:19 +00001089static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091{
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001136/* Process (refill) a large buffer queue. */
1137static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138{
Ron Mercer49f21862009-02-23 10:42:16 +00001139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001141 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001142 u64 map;
1143 int i;
1144
Ron Mercer7c734352009-10-19 03:32:19 +00001145 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001146 for (i = 0; i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001152 netif_err(qdev, ifup, qdev->ndev,
1153 "Could not get a page chunk.\n");
1154 return;
1155 }
Ron Mercer7c734352009-10-19 03:32:19 +00001156
1157 map = lbq_desc->p.pg_chunk.map +
1158 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001159 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1160 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001161 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001162 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001163
1164 pci_dma_sync_single_for_device(qdev->pdev, map,
1165 rx_ring->lbq_buf_size,
1166 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001167 clean_idx++;
1168 if (clean_idx == rx_ring->lbq_len)
1169 clean_idx = 0;
1170 }
1171
1172 rx_ring->lbq_clean_idx = clean_idx;
1173 rx_ring->lbq_prod_idx += 16;
1174 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1175 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001176 rx_ring->lbq_free_cnt -= 16;
1177 }
1178
1179 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001180 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181 "lbq: updating prod idx = %d.\n",
1182 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001183 ql_write_db_reg(rx_ring->lbq_prod_idx,
1184 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 }
1186}
1187
1188/* Process (refill) a small buffer queue. */
1189static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1190{
Ron Mercer49f21862009-02-23 10:42:16 +00001191 u32 clean_idx = rx_ring->sbq_clean_idx;
1192 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001193 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001194 u64 map;
1195 int i;
1196
1197 while (rx_ring->sbq_free_cnt > 16) {
1198 for (i = 0; i < 16; i++) {
1199 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001200 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1201 "sbq: try cleaning clean_idx = %d.\n",
1202 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001203 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001204 netif_printk(qdev, rx_status, KERN_DEBUG,
1205 qdev->ndev,
1206 "sbq: getting new skb for index %d.\n",
1207 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001208 sbq_desc->p.skb =
1209 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001210 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001211 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001212 netif_err(qdev, probe, qdev->ndev,
1213 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001214 rx_ring->sbq_clean_idx = clean_idx;
1215 return;
1216 }
1217 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1218 map = pci_map_single(qdev->pdev,
1219 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001220 rx_ring->sbq_buf_size,
1221 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001222 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001223 netif_err(qdev, ifup, qdev->ndev,
1224 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001225 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001226 dev_kfree_skb_any(sbq_desc->p.skb);
1227 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001228 return;
1229 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001230 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1231 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001232 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001233 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001234 }
1235
1236 clean_idx++;
1237 if (clean_idx == rx_ring->sbq_len)
1238 clean_idx = 0;
1239 }
1240 rx_ring->sbq_clean_idx = clean_idx;
1241 rx_ring->sbq_prod_idx += 16;
1242 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1243 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001244 rx_ring->sbq_free_cnt -= 16;
1245 }
1246
1247 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1249 "sbq: updating prod idx = %d.\n",
1250 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001251 ql_write_db_reg(rx_ring->sbq_prod_idx,
1252 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 }
1254}
1255
1256static void ql_update_buffer_queues(struct ql_adapter *qdev,
1257 struct rx_ring *rx_ring)
1258{
1259 ql_update_sbq(qdev, rx_ring);
1260 ql_update_lbq(qdev, rx_ring);
1261}
1262
1263/* Unmaps tx buffers. Can be called from send() if a pci mapping
1264 * fails at some stage, or from the interrupt when a tx completes.
1265 */
1266static void ql_unmap_send(struct ql_adapter *qdev,
1267 struct tx_ring_desc *tx_ring_desc, int mapped)
1268{
1269 int i;
1270 for (i = 0; i < mapped; i++) {
1271 if (i == 0 || (i == 7 && mapped > 7)) {
1272 /*
1273 * Unmap the skb->data area, or the
1274 * external sglist (AKA the Outbound
1275 * Address List (OAL)).
1276 * If its the zeroeth element, then it's
1277 * the skb->data area. If it's the 7th
1278 * element and there is more than 6 frags,
1279 * then its an OAL.
1280 */
1281 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001282 netif_printk(qdev, tx_done, KERN_DEBUG,
1283 qdev->ndev,
1284 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001285 }
1286 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001287 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 maplen),
1291 PCI_DMA_TODEVICE);
1292 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001293 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1294 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001295 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001296 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 maplen), PCI_DMA_TODEVICE);
1300 }
1301 }
1302
1303}
1304
1305/* Map the buffers for this transmit. This will return
1306 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1307 */
1308static int ql_map_send(struct ql_adapter *qdev,
1309 struct ob_mac_iocb_req *mac_iocb_ptr,
1310 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1311{
1312 int len = skb_headlen(skb);
1313 dma_addr_t map;
1314 int frag_idx, err, map_idx = 0;
1315 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1316 int frag_cnt = skb_shinfo(skb)->nr_frags;
1317
1318 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001319 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1320 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001329 netif_err(qdev, tx_queued, qdev->ndev,
1330 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001337 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001376 netif_err(qdev, tx_queued, qdev->ndev,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001391 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001392 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
Eric Dumazet9e903e02011-10-18 21:00:24 +00001399 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001400 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001401
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001404 netif_err(qdev, tx_queued, qdev->ndev,
1405 "PCI mapping frags failed with error: %d.\n",
1406 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001407 goto map_error;
1408 }
1409
1410 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001411 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001412 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1413 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001414 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001415
1416 }
1417 /* Save the number of segments we've mapped. */
1418 tx_ring_desc->map_cnt = map_idx;
1419 /* Terminate the last segment. */
1420 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1421 return NETDEV_TX_OK;
1422
1423map_error:
1424 /*
1425 * If the first frag mapping failed, then i will be zero.
1426 * This causes the unmap of the skb->data area. Otherwise
1427 * we pass in the number of frags that mapped successfully
1428 * so they can be umapped.
1429 */
1430 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1431 return NETDEV_TX_BUSY;
1432}
1433
Ron Mercer4f848c02010-01-02 10:37:43 +00001434/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001435static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1436 struct rx_ring *rx_ring,
1437 struct ib_mac_iocb_rsp *ib_mac_rsp,
1438 u32 length,
1439 u16 vlan_id)
1440{
1441 struct sk_buff *skb;
1442 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001443 struct napi_struct *napi = &rx_ring->napi;
1444
1445 napi->dev = qdev->ndev;
1446
1447 skb = napi_get_frags(napi);
1448 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001449 netif_err(qdev, drv, qdev->ndev,
1450 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001451 rx_ring->rx_dropped++;
1452 put_page(lbq_desc->p.pg_chunk.page);
1453 return;
1454 }
1455 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001456 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1457 lbq_desc->p.pg_chunk.page,
1458 lbq_desc->p.pg_chunk.offset,
1459 length);
Ron Mercer63526712010-01-02 10:37:44 +00001460
1461 skb->len += length;
1462 skb->data_len += length;
1463 skb->truesize += length;
1464 skb_shinfo(skb)->nr_frags++;
1465
1466 rx_ring->rx_packets++;
1467 rx_ring->rx_bytes += length;
1468 skb->ip_summed = CHECKSUM_UNNECESSARY;
1469 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001470 if (vlan_id != 0xffff)
1471 __vlan_hwaccel_put_tag(skb, vlan_id);
1472 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001473}
1474
1475/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001476static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1477 struct rx_ring *rx_ring,
1478 struct ib_mac_iocb_rsp *ib_mac_rsp,
1479 u32 length,
1480 u16 vlan_id)
1481{
1482 struct net_device *ndev = qdev->ndev;
1483 struct sk_buff *skb = NULL;
1484 void *addr;
1485 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1486 struct napi_struct *napi = &rx_ring->napi;
1487
1488 skb = netdev_alloc_skb(ndev, length);
1489 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001490 netif_err(qdev, drv, qdev->ndev,
1491 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001492 rx_ring->rx_dropped++;
1493 put_page(lbq_desc->p.pg_chunk.page);
1494 return;
1495 }
1496
1497 addr = lbq_desc->p.pg_chunk.va;
1498 prefetch(addr);
1499
1500
1501 /* Frame error, so drop the packet. */
1502 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001503 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001504 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001505 rx_ring->rx_errors++;
1506 goto err_out;
1507 }
1508
1509 /* The max framesize filter on this chip is set higher than
1510 * MTU since FCoE uses 2k frames.
1511 */
1512 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001513 netif_err(qdev, drv, qdev->ndev,
1514 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001515 rx_ring->rx_dropped++;
1516 goto err_out;
1517 }
1518 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001519 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1520 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1521 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001522 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1523 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1524 length-ETH_HLEN);
1525 skb->len += length-ETH_HLEN;
1526 skb->data_len += length-ETH_HLEN;
1527 skb->truesize += length-ETH_HLEN;
1528
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += skb->len;
1531 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001532 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001533
Michał Mirosław88230fd2011-04-18 13:31:21 +00001534 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001535 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1536 /* TCP frame. */
1537 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001538 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1539 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001540 skb->ip_summed = CHECKSUM_UNNECESSARY;
1541 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1542 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1543 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001544 struct iphdr *iph =
1545 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001546 if (!(iph->frag_off &
1547 cpu_to_be16(IP_MF|IP_OFFSET))) {
1548 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001549 netif_printk(qdev, rx_status, KERN_DEBUG,
1550 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001551 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001552 }
1553 }
1554 }
1555
1556 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001557 if (vlan_id != 0xffff)
1558 __vlan_hwaccel_put_tag(skb, vlan_id);
1559 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1560 napi_gro_receive(napi, skb);
1561 else
1562 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001563 return;
1564err_out:
1565 dev_kfree_skb_any(skb);
1566 put_page(lbq_desc->p.pg_chunk.page);
1567}
1568
1569/* Process an inbound completion from an rx ring. */
1570static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1571 struct rx_ring *rx_ring,
1572 struct ib_mac_iocb_rsp *ib_mac_rsp,
1573 u32 length,
1574 u16 vlan_id)
1575{
1576 struct net_device *ndev = qdev->ndev;
1577 struct sk_buff *skb = NULL;
1578 struct sk_buff *new_skb = NULL;
1579 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1580
1581 skb = sbq_desc->p.skb;
1582 /* Allocate new_skb and copy */
1583 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1584 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001585 netif_err(qdev, probe, qdev->ndev,
1586 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001587 rx_ring->rx_dropped++;
1588 return;
1589 }
1590 skb_reserve(new_skb, NET_IP_ALIGN);
1591 memcpy(skb_put(new_skb, length), skb->data, length);
1592 skb = new_skb;
1593
1594 /* Frame error, so drop the packet. */
1595 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001596 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001597 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001598 dev_kfree_skb_any(skb);
1599 rx_ring->rx_errors++;
1600 return;
1601 }
1602
1603 /* loopback self test for ethtool */
1604 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1605 ql_check_lb_frame(qdev, skb);
1606 dev_kfree_skb_any(skb);
1607 return;
1608 }
1609
1610 /* The max framesize filter on this chip is set higher than
1611 * MTU since FCoE uses 2k frames.
1612 */
1613 if (skb->len > ndev->mtu + ETH_HLEN) {
1614 dev_kfree_skb_any(skb);
1615 rx_ring->rx_dropped++;
1616 return;
1617 }
1618
1619 prefetch(skb->data);
1620 skb->dev = ndev;
1621 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001622 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1623 "%s Multicast.\n",
1624 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1625 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1626 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1627 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1628 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1629 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001630 }
1631 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001632 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1633 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001634
1635 rx_ring->rx_packets++;
1636 rx_ring->rx_bytes += skb->len;
1637 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001638 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001639
1640 /* If rx checksum is on, and there are no
1641 * csum or frame errors.
1642 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001643 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001644 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1645 /* TCP frame. */
1646 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001647 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1648 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001649 skb->ip_summed = CHECKSUM_UNNECESSARY;
1650 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1651 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1652 /* Unfragmented ipv4 UDP frame. */
1653 struct iphdr *iph = (struct iphdr *) skb->data;
1654 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001655 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001656 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001657 netif_printk(qdev, rx_status, KERN_DEBUG,
1658 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001659 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001660 }
1661 }
1662 }
1663
1664 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001665 if (vlan_id != 0xffff)
1666 __vlan_hwaccel_put_tag(skb, vlan_id);
1667 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1668 napi_gro_receive(&rx_ring->napi, skb);
1669 else
1670 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001671}
1672
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001673static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001674{
1675 void *temp_addr = skb->data;
1676
1677 /* Undo the skb_reserve(skb,32) we did before
1678 * giving to hardware, and realign data on
1679 * a 2-byte boundary.
1680 */
1681 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1682 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1683 skb_copy_to_linear_data(skb, temp_addr,
1684 (unsigned int)len);
1685}
1686
1687/*
1688 * This function builds an skb for the given inbound
1689 * completion. It will be rewritten for readability in the near
1690 * future, but for not it works well.
1691 */
1692static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1693 struct rx_ring *rx_ring,
1694 struct ib_mac_iocb_rsp *ib_mac_rsp)
1695{
1696 struct bq_desc *lbq_desc;
1697 struct bq_desc *sbq_desc;
1698 struct sk_buff *skb = NULL;
1699 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1700 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1701
1702 /*
1703 * Handle the header buffer if present.
1704 */
1705 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1706 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001707 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1708 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001709 /*
1710 * Headers fit nicely into a small buffer.
1711 */
1712 sbq_desc = ql_get_curr_sbuf(rx_ring);
1713 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001714 dma_unmap_addr(sbq_desc, mapaddr),
1715 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001716 PCI_DMA_FROMDEVICE);
1717 skb = sbq_desc->p.skb;
1718 ql_realign_skb(skb, hdr_len);
1719 skb_put(skb, hdr_len);
1720 sbq_desc->p.skb = NULL;
1721 }
1722
1723 /*
1724 * Handle the data buffer(s).
1725 */
1726 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001727 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1728 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001729 return skb;
1730 }
1731
1732 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1733 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001734 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1735 "Headers in small, data of %d bytes in small, combine them.\n",
1736 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001737 /*
1738 * Data is less than small buffer size so it's
1739 * stuffed in a small buffer.
1740 * For this case we append the data
1741 * from the "data" small buffer to the "header" small
1742 * buffer.
1743 */
1744 sbq_desc = ql_get_curr_sbuf(rx_ring);
1745 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001746 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001747 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001748 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001749 (sbq_desc, maplen),
1750 PCI_DMA_FROMDEVICE);
1751 memcpy(skb_put(skb, length),
1752 sbq_desc->p.skb->data, length);
1753 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001754 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001755 (sbq_desc,
1756 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001757 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001758 (sbq_desc,
1759 maplen),
1760 PCI_DMA_FROMDEVICE);
1761 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001762 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1763 "%d bytes in a single small buffer.\n",
1764 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001765 sbq_desc = ql_get_curr_sbuf(rx_ring);
1766 skb = sbq_desc->p.skb;
1767 ql_realign_skb(skb, length);
1768 skb_put(skb, length);
1769 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001770 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001771 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001772 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001773 maplen),
1774 PCI_DMA_FROMDEVICE);
1775 sbq_desc->p.skb = NULL;
1776 }
1777 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1778 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001779 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780 "Header in small, %d bytes in large. Chain large to small!\n",
1781 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001782 /*
1783 * The data is in a single large buffer. We
1784 * chain it to the header buffer's skb and let
1785 * it rip.
1786 */
Ron Mercer7c734352009-10-19 03:32:19 +00001787 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001788 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1789 "Chaining page at offset = %d, for %d bytes to skb.\n",
1790 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001791 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1792 lbq_desc->p.pg_chunk.offset,
1793 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001794 skb->len += length;
1795 skb->data_len += length;
1796 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001797 } else {
1798 /*
1799 * The headers and data are in a single large buffer. We
1800 * copy it to a new skb and let it go. This can happen with
1801 * jumbo mtu on a non-TCP/UDP frame.
1802 */
Ron Mercer7c734352009-10-19 03:32:19 +00001803 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001804 skb = netdev_alloc_skb(qdev->ndev, length);
1805 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001806 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1807 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001808 return NULL;
1809 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001810 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001811 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001812 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001813 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001814 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001815 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001816 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1817 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1818 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001819 skb_fill_page_desc(skb, 0,
1820 lbq_desc->p.pg_chunk.page,
1821 lbq_desc->p.pg_chunk.offset,
1822 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001823 skb->len += length;
1824 skb->data_len += length;
1825 skb->truesize += length;
1826 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001827 __pskb_pull_tail(skb,
1828 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1829 VLAN_ETH_HLEN : ETH_HLEN);
1830 }
1831 } else {
1832 /*
1833 * The data is in a chain of large buffers
1834 * pointed to by a small buffer. We loop
1835 * thru and chain them to the our small header
1836 * buffer's skb.
1837 * frags: There are 18 max frags and our small
1838 * buffer will hold 32 of them. The thing is,
1839 * we'll use 3 max for our 9000 byte jumbo
1840 * frames. If the MTU goes up we could
1841 * eventually be in trouble.
1842 */
Ron Mercer7c734352009-10-19 03:32:19 +00001843 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001844 sbq_desc = ql_get_curr_sbuf(rx_ring);
1845 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001846 dma_unmap_addr(sbq_desc, mapaddr),
1847 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001848 PCI_DMA_FROMDEVICE);
1849 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1850 /*
1851 * This is an non TCP/UDP IP frame, so
1852 * the headers aren't split into a small
1853 * buffer. We have to use the small buffer
1854 * that contains our sg list as our skb to
1855 * send upstairs. Copy the sg list here to
1856 * a local buffer and use it to find the
1857 * pages to chain.
1858 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001859 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1860 "%d bytes of headers & data in chain of large.\n",
1861 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001862 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 sbq_desc->p.skb = NULL;
1864 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001865 }
1866 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001867 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1868 size = (length < rx_ring->lbq_buf_size) ? length :
1869 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001870
Joe Perchesae9540f72010-02-09 11:49:52 +00001871 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1872 "Adding page %d to skb for %d bytes.\n",
1873 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001874 skb_fill_page_desc(skb, i,
1875 lbq_desc->p.pg_chunk.page,
1876 lbq_desc->p.pg_chunk.offset,
1877 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 skb->len += size;
1879 skb->data_len += size;
1880 skb->truesize += size;
1881 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001882 i++;
1883 }
1884 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1885 VLAN_ETH_HLEN : ETH_HLEN);
1886 }
1887 return skb;
1888}
1889
1890/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001891static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001892 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001893 struct ib_mac_iocb_rsp *ib_mac_rsp,
1894 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895{
1896 struct net_device *ndev = qdev->ndev;
1897 struct sk_buff *skb = NULL;
1898
1899 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1900
1901 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1902 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001903 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1904 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001905 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001906 return;
1907 }
1908
Ron Mercera32959c2009-06-09 05:39:27 +00001909 /* Frame error, so drop the packet. */
1910 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001911 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001912 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001913 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001914 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001915 return;
1916 }
Ron Mercerec33a492009-06-09 05:39:28 +00001917
1918 /* The max framesize filter on this chip is set higher than
1919 * MTU since FCoE uses 2k frames.
1920 */
1921 if (skb->len > ndev->mtu + ETH_HLEN) {
1922 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001923 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001924 return;
1925 }
1926
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001927 /* loopback self test for ethtool */
1928 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1929 ql_check_lb_frame(qdev, skb);
1930 dev_kfree_skb_any(skb);
1931 return;
1932 }
1933
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001934 prefetch(skb->data);
1935 skb->dev = ndev;
1936 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1938 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1939 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001944 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001945 }
1946 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001947 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1948 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001949 }
Ron Mercerd555f592009-03-09 10:59:19 +00001950
Ron Mercerd555f592009-03-09 10:59:19 +00001951 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001952 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001953
1954 /* If rx checksum is on, and there are no
1955 * csum or frame errors.
1956 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001957 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001958 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1959 /* TCP frame. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001961 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1962 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001963 skb->ip_summed = CHECKSUM_UNNECESSARY;
1964 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1965 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1966 /* Unfragmented ipv4 UDP frame. */
1967 struct iphdr *iph = (struct iphdr *) skb->data;
1968 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001969 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00001970 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1972 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001973 }
1974 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001975 }
Ron Mercerd555f592009-03-09 10:59:19 +00001976
Ron Mercer885ee392009-11-03 13:49:31 +00001977 rx_ring->rx_packets++;
1978 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001979 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001980 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1981 __vlan_hwaccel_put_tag(skb, vlan_id);
1982 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1983 napi_gro_receive(&rx_ring->napi, skb);
1984 else
1985 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001986}
1987
Ron Mercer4f848c02010-01-02 10:37:43 +00001988/* Process an inbound completion from an rx ring. */
1989static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1990 struct rx_ring *rx_ring,
1991 struct ib_mac_iocb_rsp *ib_mac_rsp)
1992{
1993 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1994 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1995 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1996 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1997
1998 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1999
2000 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2001 /* The data and headers are split into
2002 * separate buffers.
2003 */
2004 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2005 vlan_id);
2006 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2007 /* The data fit in a single small buffer.
2008 * Allocate a new skb, copy the data and
2009 * return the buffer to the free pool.
2010 */
2011 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2012 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002013 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2014 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2015 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2016 /* TCP packet in a page chunk that's been checksummed.
2017 * Tack it on to our GRO skb and let it go.
2018 */
2019 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2020 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002021 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2022 /* Non-TCP packet in a page chunk. Allocate an
2023 * skb, tack it on frags, and send it up.
2024 */
2025 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2026 length, vlan_id);
2027 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002028 /* Non-TCP/UDP large frames that span multiple buffers
2029 * can be processed corrrectly by the split frame logic.
2030 */
2031 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2032 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002033 }
2034
2035 return (unsigned long)length;
2036}
2037
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002038/* Process an outbound completion from an rx ring. */
2039static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2040 struct ob_mac_iocb_rsp *mac_rsp)
2041{
2042 struct tx_ring *tx_ring;
2043 struct tx_ring_desc *tx_ring_desc;
2044
2045 QL_DUMP_OB_MAC_RSP(mac_rsp);
2046 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2047 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2048 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002049 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2050 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002051 dev_kfree_skb(tx_ring_desc->skb);
2052 tx_ring_desc->skb = NULL;
2053
2054 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2055 OB_MAC_IOCB_RSP_S |
2056 OB_MAC_IOCB_RSP_L |
2057 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2058 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002059 netif_warn(qdev, tx_done, qdev->ndev,
2060 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002061 }
2062 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002063 netif_warn(qdev, tx_done, qdev->ndev,
2064 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002065 }
2066 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002067 netif_warn(qdev, tx_done, qdev->ndev,
2068 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002069 }
2070 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002071 netif_warn(qdev, tx_done, qdev->ndev,
2072 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002073 }
2074 }
2075 atomic_inc(&tx_ring->tx_count);
2076}
2077
2078/* Fire up a handler to reset the MPI processor. */
2079void ql_queue_fw_error(struct ql_adapter *qdev)
2080{
Ron Mercer6a473302009-07-02 06:06:12 +00002081 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002082 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2083}
2084
2085void ql_queue_asic_error(struct ql_adapter *qdev)
2086{
Ron Mercer6a473302009-07-02 06:06:12 +00002087 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002088 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002089 /* Clear adapter up bit to signal the recovery
2090 * process that it shouldn't kill the reset worker
2091 * thread
2092 */
2093 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002094 /* Set asic recovery bit to indicate reset process that we are
2095 * in fatal error recovery process rather than normal close
2096 */
2097 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002098 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2099}
2100
2101static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2102 struct ib_ae_iocb_rsp *ib_ae_rsp)
2103{
2104 switch (ib_ae_rsp->event) {
2105 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002106 netif_err(qdev, rx_err, qdev->ndev,
2107 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002108 ql_queue_fw_error(qdev);
2109 return;
2110
2111 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002112 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2113 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002114 ql_queue_asic_error(qdev);
2115 return;
2116
2117 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002118 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002119 ql_queue_asic_error(qdev);
2120 break;
2121
2122 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002123 netdev_err(qdev->ndev, "PCI error occurred when reading "
2124 "anonymous buffers from rx_ring %d.\n",
2125 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 ql_queue_asic_error(qdev);
2127 break;
2128
2129 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002130 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2131 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 ql_queue_asic_error(qdev);
2133 break;
2134 }
2135}
2136
2137static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2138{
2139 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002140 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002141 struct ob_mac_iocb_rsp *net_rsp = NULL;
2142 int count = 0;
2143
Ron Mercer1e213302009-03-09 10:59:21 +00002144 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 /* While there are entries in the completion queue. */
2146 while (prod != rx_ring->cnsmr_idx) {
2147
Joe Perchesae9540f72010-02-09 11:49:52 +00002148 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2149 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2150 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002151
2152 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2153 rmb();
2154 switch (net_rsp->opcode) {
2155
2156 case OPCODE_OB_MAC_TSO_IOCB:
2157 case OPCODE_OB_MAC_IOCB:
2158 ql_process_mac_tx_intr(qdev, net_rsp);
2159 break;
2160 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2162 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2163 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002164 }
2165 count++;
2166 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002167 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002168 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002169 if (!net_rsp)
2170 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002171 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002172 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002173 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002174 if (atomic_read(&tx_ring->queue_stopped) &&
2175 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2176 /*
2177 * The queue got stopped because the tx_ring was full.
2178 * Wake it up, because it's now at least 25% empty.
2179 */
Ron Mercer1e213302009-03-09 10:59:21 +00002180 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002181 }
2182
2183 return count;
2184}
2185
2186static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2187{
2188 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002189 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002190 struct ql_net_rsp_iocb *net_rsp;
2191 int count = 0;
2192
2193 /* While there are entries in the completion queue. */
2194 while (prod != rx_ring->cnsmr_idx) {
2195
Joe Perchesae9540f72010-02-09 11:49:52 +00002196 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2197 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2198 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002199
2200 net_rsp = rx_ring->curr_entry;
2201 rmb();
2202 switch (net_rsp->opcode) {
2203 case OPCODE_IB_MAC_IOCB:
2204 ql_process_mac_rx_intr(qdev, rx_ring,
2205 (struct ib_mac_iocb_rsp *)
2206 net_rsp);
2207 break;
2208
2209 case OPCODE_IB_AE_IOCB:
2210 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2211 net_rsp);
2212 break;
2213 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2215 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2216 net_rsp->opcode);
2217 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002218 }
2219 count++;
2220 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002221 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002222 if (count == budget)
2223 break;
2224 }
2225 ql_update_buffer_queues(qdev, rx_ring);
2226 ql_write_cq_idx(rx_ring);
2227 return count;
2228}
2229
2230static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2231{
2232 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2233 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002234 struct rx_ring *trx_ring;
2235 int i, work_done = 0;
2236 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002237
Joe Perchesae9540f72010-02-09 11:49:52 +00002238 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2239 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002240
Ron Mercer39aa8162009-08-27 11:02:11 +00002241 /* Service the TX rings first. They start
2242 * right after the RSS rings. */
2243 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2244 trx_ring = &qdev->rx_ring[i];
2245 /* If this TX completion ring belongs to this vector and
2246 * it's not empty then service it.
2247 */
2248 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2249 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2250 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002251 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2252 "%s: Servicing TX completion ring %d.\n",
2253 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002254 ql_clean_outbound_rx_ring(trx_ring);
2255 }
2256 }
2257
2258 /*
2259 * Now service the RSS ring if it's active.
2260 */
2261 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2262 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002263 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2264 "%s: Servicing RX completion ring %d.\n",
2265 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002266 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2267 }
2268
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002269 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002270 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2272 }
2273 return work_done;
2274}
2275
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002276static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002277{
2278 struct ql_adapter *qdev = netdev_priv(ndev);
2279
Jiri Pirko18c49b92011-07-21 03:24:11 +00002280 if (features & NETIF_F_HW_VLAN_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002281 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002282 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002283 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002284 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2285 }
2286}
2287
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002288static netdev_features_t qlge_fix_features(struct net_device *ndev,
2289 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002290{
2291 /*
2292 * Since there is no support for separate rx/tx vlan accel
2293 * enable/disable make sure tx flag is always in same state as rx.
2294 */
2295 if (features & NETIF_F_HW_VLAN_RX)
2296 features |= NETIF_F_HW_VLAN_TX;
2297 else
2298 features &= ~NETIF_F_HW_VLAN_TX;
2299
2300 return features;
2301}
2302
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002303static int qlge_set_features(struct net_device *ndev,
2304 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002305{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002306 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002307
2308 if (changed & NETIF_F_HW_VLAN_RX)
2309 qlge_vlan_mode(ndev, features);
2310
2311 return 0;
2312}
2313
Jiri Pirko8e586132011-12-08 19:52:37 -05002314static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002315{
2316 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002317 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002318
Jiri Pirko8e586132011-12-08 19:52:37 -05002319 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2320 MAC_ADDR_TYPE_VLAN, vid);
2321 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002322 netif_err(qdev, ifup, qdev->ndev,
2323 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002324 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002325}
2326
Jiri Pirko8e586132011-12-08 19:52:37 -05002327static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328{
2329 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002330 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002331 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002332
Ron Mercercc288f52009-02-23 10:42:14 +00002333 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2334 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002335 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002336
Jiri Pirko8e586132011-12-08 19:52:37 -05002337 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002338 set_bit(vid, qdev->active_vlans);
2339
Ron Mercercc288f52009-02-23 10:42:14 +00002340 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002341
2342 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002343}
2344
Jiri Pirko8e586132011-12-08 19:52:37 -05002345static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002346{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002347 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002348 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002349
Jiri Pirko8e586132011-12-08 19:52:37 -05002350 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2351 MAC_ADDR_TYPE_VLAN, vid);
2352 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002353 netif_err(qdev, ifup, qdev->ndev,
2354 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002355 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002356}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002357
Jiri Pirko8e586132011-12-08 19:52:37 -05002358static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002359{
2360 struct ql_adapter *qdev = netdev_priv(ndev);
2361 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002362 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002363
2364 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2365 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002366 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002367
Jiri Pirko8e586132011-12-08 19:52:37 -05002368 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002369 clear_bit(vid, qdev->active_vlans);
2370
2371 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002372
2373 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002374}
2375
Ron Mercerc1b60092010-10-27 04:58:12 +00002376static void qlge_restore_vlan(struct ql_adapter *qdev)
2377{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002378 int status;
2379 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002380
Jiri Pirko18c49b92011-07-21 03:24:11 +00002381 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2382 if (status)
2383 return;
2384
2385 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2386 __qlge_vlan_rx_add_vid(qdev, vid);
2387
2388 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002389}
2390
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002391/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2392static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2393{
2394 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002395 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002396 return IRQ_HANDLED;
2397}
2398
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002399/* This handles a fatal error, MPI activity, and the default
2400 * rx_ring in an MSI-X multiple vector environment.
2401 * In MSI/Legacy environment it also process the rest of
2402 * the rx_rings.
2403 */
2404static irqreturn_t qlge_isr(int irq, void *dev_id)
2405{
2406 struct rx_ring *rx_ring = dev_id;
2407 struct ql_adapter *qdev = rx_ring->qdev;
2408 struct intr_context *intr_context = &qdev->intr_context[0];
2409 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002410 int work_done = 0;
2411
Ron Mercerbb0d2152008-10-20 10:30:26 -07002412 spin_lock(&qdev->hw_lock);
2413 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002414 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2415 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002416 spin_unlock(&qdev->hw_lock);
2417 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002418 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002419 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002420
Ron Mercerbb0d2152008-10-20 10:30:26 -07002421 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002422
2423 /*
2424 * Check for fatal error.
2425 */
2426 if (var & STS_FE) {
2427 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002428 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002429 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002430 netdev_err(qdev->ndev, "Resetting chip. "
2431 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002432 return IRQ_HANDLED;
2433 }
2434
2435 /*
2436 * Check MPI processor activity.
2437 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002438 if ((var & STS_PI) &&
2439 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002440 /*
2441 * We've got an async event or mailbox completion.
2442 * Handle it and clear the source of the interrupt.
2443 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002444 netif_err(qdev, intr, qdev->ndev,
2445 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002446 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002447 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2448 queue_delayed_work_on(smp_processor_id(),
2449 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002450 work_done++;
2451 }
2452
2453 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002454 * Get the bit-mask that shows the active queues for this
2455 * pass. Compare it to the queues that this irq services
2456 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002457 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002458 var = ql_read32(qdev, ISR1);
2459 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002460 netif_info(qdev, intr, qdev->ndev,
2461 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002462 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002463 napi_schedule(&rx_ring->napi);
2464 work_done++;
2465 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002466 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002467 return work_done ? IRQ_HANDLED : IRQ_NONE;
2468}
2469
2470static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2471{
2472
2473 if (skb_is_gso(skb)) {
2474 int err;
2475 if (skb_header_cloned(skb)) {
2476 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2477 if (err)
2478 return err;
2479 }
2480
2481 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2482 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2483 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2484 mac_iocb_ptr->total_hdrs_len =
2485 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2486 mac_iocb_ptr->net_trans_offset =
2487 cpu_to_le16(skb_network_offset(skb) |
2488 skb_transport_offset(skb)
2489 << OB_MAC_TRANSPORT_HDR_SHIFT);
2490 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2491 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2492 if (likely(skb->protocol == htons(ETH_P_IP))) {
2493 struct iphdr *iph = ip_hdr(skb);
2494 iph->check = 0;
2495 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2496 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2497 iph->daddr, 0,
2498 IPPROTO_TCP,
2499 0);
2500 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2501 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2502 tcp_hdr(skb)->check =
2503 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2504 &ipv6_hdr(skb)->daddr,
2505 0, IPPROTO_TCP, 0);
2506 }
2507 return 1;
2508 }
2509 return 0;
2510}
2511
2512static void ql_hw_csum_setup(struct sk_buff *skb,
2513 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2514{
2515 int len;
2516 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002517 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002518 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2519 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2520 mac_iocb_ptr->net_trans_offset =
2521 cpu_to_le16(skb_network_offset(skb) |
2522 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2523
2524 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2525 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2526 if (likely(iph->protocol == IPPROTO_TCP)) {
2527 check = &(tcp_hdr(skb)->check);
2528 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2529 mac_iocb_ptr->total_hdrs_len =
2530 cpu_to_le16(skb_transport_offset(skb) +
2531 (tcp_hdr(skb)->doff << 2));
2532 } else {
2533 check = &(udp_hdr(skb)->check);
2534 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2535 mac_iocb_ptr->total_hdrs_len =
2536 cpu_to_le16(skb_transport_offset(skb) +
2537 sizeof(struct udphdr));
2538 }
2539 *check = ~csum_tcpudp_magic(iph->saddr,
2540 iph->daddr, len, iph->protocol, 0);
2541}
2542
Stephen Hemminger613573252009-08-31 19:50:58 +00002543static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002544{
2545 struct tx_ring_desc *tx_ring_desc;
2546 struct ob_mac_iocb_req *mac_iocb_ptr;
2547 struct ql_adapter *qdev = netdev_priv(ndev);
2548 int tso;
2549 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002550 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002551
2552 tx_ring = &qdev->tx_ring[tx_ring_idx];
2553
Ron Mercer74c50b42009-03-09 10:59:27 +00002554 if (skb_padto(skb, ETH_ZLEN))
2555 return NETDEV_TX_OK;
2556
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002557 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002558 netif_info(qdev, tx_queued, qdev->ndev,
2559 "%s: shutting down tx queue %d du to lack of resources.\n",
2560 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002561 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002562 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002563 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002564 return NETDEV_TX_BUSY;
2565 }
2566 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2567 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002568 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002569
2570 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2571 mac_iocb_ptr->tid = tx_ring_desc->index;
2572 /* We use the upper 32-bits to store the tx queue for this IO.
2573 * When we get the completion we can use it to establish the context.
2574 */
2575 mac_iocb_ptr->txq_idx = tx_ring_idx;
2576 tx_ring_desc->skb = skb;
2577
2578 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2579
Jesse Grosseab6d182010-10-20 13:56:03 +00002580 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002581 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2582 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002583 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2584 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2585 }
2586 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2587 if (tso < 0) {
2588 dev_kfree_skb_any(skb);
2589 return NETDEV_TX_OK;
2590 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2591 ql_hw_csum_setup(skb,
2592 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2593 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002594 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2595 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002596 netif_err(qdev, tx_queued, qdev->ndev,
2597 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002598 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002599 return NETDEV_TX_BUSY;
2600 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002601 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2602 tx_ring->prod_idx++;
2603 if (tx_ring->prod_idx == tx_ring->wq_len)
2604 tx_ring->prod_idx = 0;
2605 wmb();
2606
2607 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002608 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2609 "tx queued, slot %d, len %d\n",
2610 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002611
2612 atomic_dec(&tx_ring->tx_count);
2613 return NETDEV_TX_OK;
2614}
2615
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002616
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002617static void ql_free_shadow_space(struct ql_adapter *qdev)
2618{
2619 if (qdev->rx_ring_shadow_reg_area) {
2620 pci_free_consistent(qdev->pdev,
2621 PAGE_SIZE,
2622 qdev->rx_ring_shadow_reg_area,
2623 qdev->rx_ring_shadow_reg_dma);
2624 qdev->rx_ring_shadow_reg_area = NULL;
2625 }
2626 if (qdev->tx_ring_shadow_reg_area) {
2627 pci_free_consistent(qdev->pdev,
2628 PAGE_SIZE,
2629 qdev->tx_ring_shadow_reg_area,
2630 qdev->tx_ring_shadow_reg_dma);
2631 qdev->tx_ring_shadow_reg_area = NULL;
2632 }
2633}
2634
2635static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2636{
2637 qdev->rx_ring_shadow_reg_area =
2638 pci_alloc_consistent(qdev->pdev,
2639 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2640 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002641 netif_err(qdev, ifup, qdev->ndev,
2642 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002643 return -ENOMEM;
2644 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002645 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002646 qdev->tx_ring_shadow_reg_area =
2647 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2648 &qdev->tx_ring_shadow_reg_dma);
2649 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002650 netif_err(qdev, ifup, qdev->ndev,
2651 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002652 goto err_wqp_sh_area;
2653 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002654 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002655 return 0;
2656
2657err_wqp_sh_area:
2658 pci_free_consistent(qdev->pdev,
2659 PAGE_SIZE,
2660 qdev->rx_ring_shadow_reg_area,
2661 qdev->rx_ring_shadow_reg_dma);
2662 return -ENOMEM;
2663}
2664
2665static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2666{
2667 struct tx_ring_desc *tx_ring_desc;
2668 int i;
2669 struct ob_mac_iocb_req *mac_iocb_ptr;
2670
2671 mac_iocb_ptr = tx_ring->wq_base;
2672 tx_ring_desc = tx_ring->q;
2673 for (i = 0; i < tx_ring->wq_len; i++) {
2674 tx_ring_desc->index = i;
2675 tx_ring_desc->skb = NULL;
2676 tx_ring_desc->queue_entry = mac_iocb_ptr;
2677 mac_iocb_ptr++;
2678 tx_ring_desc++;
2679 }
2680 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2681 atomic_set(&tx_ring->queue_stopped, 0);
2682}
2683
2684static void ql_free_tx_resources(struct ql_adapter *qdev,
2685 struct tx_ring *tx_ring)
2686{
2687 if (tx_ring->wq_base) {
2688 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2689 tx_ring->wq_base, tx_ring->wq_base_dma);
2690 tx_ring->wq_base = NULL;
2691 }
2692 kfree(tx_ring->q);
2693 tx_ring->q = NULL;
2694}
2695
2696static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2697 struct tx_ring *tx_ring)
2698{
2699 tx_ring->wq_base =
2700 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2701 &tx_ring->wq_base_dma);
2702
Joe Perches8e95a202009-12-03 07:58:21 +00002703 if ((tx_ring->wq_base == NULL) ||
2704 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002705 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002706 return -ENOMEM;
2707 }
2708 tx_ring->q =
2709 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2710 if (tx_ring->q == NULL)
2711 goto err;
2712
2713 return 0;
2714err:
2715 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2716 tx_ring->wq_base, tx_ring->wq_base_dma);
2717 return -ENOMEM;
2718}
2719
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002720static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002721{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002722 struct bq_desc *lbq_desc;
2723
Ron Mercer7c734352009-10-19 03:32:19 +00002724 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002725
Ron Mercer7c734352009-10-19 03:32:19 +00002726 curr_idx = rx_ring->lbq_curr_idx;
2727 clean_idx = rx_ring->lbq_clean_idx;
2728 while (curr_idx != clean_idx) {
2729 lbq_desc = &rx_ring->lbq[curr_idx];
2730
2731 if (lbq_desc->p.pg_chunk.last_flag) {
2732 pci_unmap_page(qdev->pdev,
2733 lbq_desc->p.pg_chunk.map,
2734 ql_lbq_block_size(qdev),
2735 PCI_DMA_FROMDEVICE);
2736 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002737 }
Ron Mercer7c734352009-10-19 03:32:19 +00002738
2739 put_page(lbq_desc->p.pg_chunk.page);
2740 lbq_desc->p.pg_chunk.page = NULL;
2741
2742 if (++curr_idx == rx_ring->lbq_len)
2743 curr_idx = 0;
2744
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002745 }
2746}
2747
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002748static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002749{
2750 int i;
2751 struct bq_desc *sbq_desc;
2752
2753 for (i = 0; i < rx_ring->sbq_len; i++) {
2754 sbq_desc = &rx_ring->sbq[i];
2755 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002756 netif_err(qdev, ifup, qdev->ndev,
2757 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002758 return;
2759 }
2760 if (sbq_desc->p.skb) {
2761 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002762 dma_unmap_addr(sbq_desc, mapaddr),
2763 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002764 PCI_DMA_FROMDEVICE);
2765 dev_kfree_skb(sbq_desc->p.skb);
2766 sbq_desc->p.skb = NULL;
2767 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002768 }
2769}
2770
Ron Mercer4545a3f2009-02-23 10:42:17 +00002771/* Free all large and small rx buffers associated
2772 * with the completion queues for this device.
2773 */
2774static void ql_free_rx_buffers(struct ql_adapter *qdev)
2775{
2776 int i;
2777 struct rx_ring *rx_ring;
2778
2779 for (i = 0; i < qdev->rx_ring_count; i++) {
2780 rx_ring = &qdev->rx_ring[i];
2781 if (rx_ring->lbq)
2782 ql_free_lbq_buffers(qdev, rx_ring);
2783 if (rx_ring->sbq)
2784 ql_free_sbq_buffers(qdev, rx_ring);
2785 }
2786}
2787
2788static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2789{
2790 struct rx_ring *rx_ring;
2791 int i;
2792
2793 for (i = 0; i < qdev->rx_ring_count; i++) {
2794 rx_ring = &qdev->rx_ring[i];
2795 if (rx_ring->type != TX_Q)
2796 ql_update_buffer_queues(qdev, rx_ring);
2797 }
2798}
2799
2800static void ql_init_lbq_ring(struct ql_adapter *qdev,
2801 struct rx_ring *rx_ring)
2802{
2803 int i;
2804 struct bq_desc *lbq_desc;
2805 __le64 *bq = rx_ring->lbq_base;
2806
2807 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2808 for (i = 0; i < rx_ring->lbq_len; i++) {
2809 lbq_desc = &rx_ring->lbq[i];
2810 memset(lbq_desc, 0, sizeof(*lbq_desc));
2811 lbq_desc->index = i;
2812 lbq_desc->addr = bq;
2813 bq++;
2814 }
2815}
2816
2817static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002818 struct rx_ring *rx_ring)
2819{
2820 int i;
2821 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002822 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002823
Ron Mercer4545a3f2009-02-23 10:42:17 +00002824 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002825 for (i = 0; i < rx_ring->sbq_len; i++) {
2826 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002827 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002828 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002829 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002830 bq++;
2831 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002832}
2833
2834static void ql_free_rx_resources(struct ql_adapter *qdev,
2835 struct rx_ring *rx_ring)
2836{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002837 /* Free the small buffer queue. */
2838 if (rx_ring->sbq_base) {
2839 pci_free_consistent(qdev->pdev,
2840 rx_ring->sbq_size,
2841 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2842 rx_ring->sbq_base = NULL;
2843 }
2844
2845 /* Free the small buffer queue control blocks. */
2846 kfree(rx_ring->sbq);
2847 rx_ring->sbq = NULL;
2848
2849 /* Free the large buffer queue. */
2850 if (rx_ring->lbq_base) {
2851 pci_free_consistent(qdev->pdev,
2852 rx_ring->lbq_size,
2853 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2854 rx_ring->lbq_base = NULL;
2855 }
2856
2857 /* Free the large buffer queue control blocks. */
2858 kfree(rx_ring->lbq);
2859 rx_ring->lbq = NULL;
2860
2861 /* Free the rx queue. */
2862 if (rx_ring->cq_base) {
2863 pci_free_consistent(qdev->pdev,
2864 rx_ring->cq_size,
2865 rx_ring->cq_base, rx_ring->cq_base_dma);
2866 rx_ring->cq_base = NULL;
2867 }
2868}
2869
2870/* Allocate queues and buffers for this completions queue based
2871 * on the values in the parameter structure. */
2872static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2873 struct rx_ring *rx_ring)
2874{
2875
2876 /*
2877 * Allocate the completion queue for this rx_ring.
2878 */
2879 rx_ring->cq_base =
2880 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2881 &rx_ring->cq_base_dma);
2882
2883 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002884 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002885 return -ENOMEM;
2886 }
2887
2888 if (rx_ring->sbq_len) {
2889 /*
2890 * Allocate small buffer queue.
2891 */
2892 rx_ring->sbq_base =
2893 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2894 &rx_ring->sbq_base_dma);
2895
2896 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002897 netif_err(qdev, ifup, qdev->ndev,
2898 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002899 goto err_mem;
2900 }
2901
2902 /*
2903 * Allocate small buffer queue control blocks.
2904 */
2905 rx_ring->sbq =
2906 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2907 GFP_KERNEL);
2908 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002909 netif_err(qdev, ifup, qdev->ndev,
2910 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002911 goto err_mem;
2912 }
2913
Ron Mercer4545a3f2009-02-23 10:42:17 +00002914 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002915 }
2916
2917 if (rx_ring->lbq_len) {
2918 /*
2919 * Allocate large buffer queue.
2920 */
2921 rx_ring->lbq_base =
2922 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2923 &rx_ring->lbq_base_dma);
2924
2925 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002926 netif_err(qdev, ifup, qdev->ndev,
2927 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002928 goto err_mem;
2929 }
2930 /*
2931 * Allocate large buffer queue control blocks.
2932 */
2933 rx_ring->lbq =
2934 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2935 GFP_KERNEL);
2936 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002937 netif_err(qdev, ifup, qdev->ndev,
2938 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002939 goto err_mem;
2940 }
2941
Ron Mercer4545a3f2009-02-23 10:42:17 +00002942 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002943 }
2944
2945 return 0;
2946
2947err_mem:
2948 ql_free_rx_resources(qdev, rx_ring);
2949 return -ENOMEM;
2950}
2951
2952static void ql_tx_ring_clean(struct ql_adapter *qdev)
2953{
2954 struct tx_ring *tx_ring;
2955 struct tx_ring_desc *tx_ring_desc;
2956 int i, j;
2957
2958 /*
2959 * Loop through all queues and free
2960 * any resources.
2961 */
2962 for (j = 0; j < qdev->tx_ring_count; j++) {
2963 tx_ring = &qdev->tx_ring[j];
2964 for (i = 0; i < tx_ring->wq_len; i++) {
2965 tx_ring_desc = &tx_ring->q[i];
2966 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002967 netif_err(qdev, ifdown, qdev->ndev,
2968 "Freeing lost SKB %p, from queue %d, index %d.\n",
2969 tx_ring_desc->skb, j,
2970 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002971 ql_unmap_send(qdev, tx_ring_desc,
2972 tx_ring_desc->map_cnt);
2973 dev_kfree_skb(tx_ring_desc->skb);
2974 tx_ring_desc->skb = NULL;
2975 }
2976 }
2977 }
2978}
2979
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002980static void ql_free_mem_resources(struct ql_adapter *qdev)
2981{
2982 int i;
2983
2984 for (i = 0; i < qdev->tx_ring_count; i++)
2985 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2986 for (i = 0; i < qdev->rx_ring_count; i++)
2987 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2988 ql_free_shadow_space(qdev);
2989}
2990
2991static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2992{
2993 int i;
2994
2995 /* Allocate space for our shadow registers and such. */
2996 if (ql_alloc_shadow_space(qdev))
2997 return -ENOMEM;
2998
2999 for (i = 0; i < qdev->rx_ring_count; i++) {
3000 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003001 netif_err(qdev, ifup, qdev->ndev,
3002 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003003 goto err_mem;
3004 }
3005 }
3006 /* Allocate tx queue resources */
3007 for (i = 0; i < qdev->tx_ring_count; i++) {
3008 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003009 netif_err(qdev, ifup, qdev->ndev,
3010 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003011 goto err_mem;
3012 }
3013 }
3014 return 0;
3015
3016err_mem:
3017 ql_free_mem_resources(qdev);
3018 return -ENOMEM;
3019}
3020
3021/* Set up the rx ring control block and pass it to the chip.
3022 * The control block is defined as
3023 * "Completion Queue Initialization Control Block", or cqicb.
3024 */
3025static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3026{
3027 struct cqicb *cqicb = &rx_ring->cqicb;
3028 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003029 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003030 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003031 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003032 void __iomem *doorbell_area =
3033 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3034 int err = 0;
3035 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003036 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003037 __le64 *base_indirect_ptr;
3038 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003039
3040 /* Set up the shadow registers for this ring. */
3041 rx_ring->prod_idx_sh_reg = shadow_reg;
3042 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003043 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003044 shadow_reg += sizeof(u64);
3045 shadow_reg_dma += sizeof(u64);
3046 rx_ring->lbq_base_indirect = shadow_reg;
3047 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003048 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3049 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003050 rx_ring->sbq_base_indirect = shadow_reg;
3051 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3052
3053 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003054 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003055 rx_ring->cnsmr_idx = 0;
3056 rx_ring->curr_entry = rx_ring->cq_base;
3057
3058 /* PCI doorbell mem area + 0x04 for valid register */
3059 rx_ring->valid_db_reg = doorbell_area + 0x04;
3060
3061 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003062 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003063
3064 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003065 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003066
3067 memset((void *)cqicb, 0, sizeof(struct cqicb));
3068 cqicb->msix_vect = rx_ring->irq;
3069
Ron Mercer459caf52009-01-04 17:08:11 -08003070 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3071 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072
Ron Mercer97345522009-01-09 11:31:50 +00003073 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003074
Ron Mercer97345522009-01-09 11:31:50 +00003075 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076
3077 /*
3078 * Set up the control block load flags.
3079 */
3080 cqicb->flags = FLAGS_LC | /* Load queue base address */
3081 FLAGS_LV | /* Load MSI-X vector */
3082 FLAGS_LI; /* Load irq delay values */
3083 if (rx_ring->lbq_len) {
3084 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003085 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003086 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003087 page_entries = 0;
3088 do {
3089 *base_indirect_ptr = cpu_to_le64(tmp);
3090 tmp += DB_PAGE_SIZE;
3091 base_indirect_ptr++;
3092 page_entries++;
3093 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003094 cqicb->lbq_addr =
3095 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003096 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3097 (u16) rx_ring->lbq_buf_size;
3098 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3099 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3100 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003101 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003102 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003103 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003104 rx_ring->lbq_clean_idx = 0;
3105 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003106 }
3107 if (rx_ring->sbq_len) {
3108 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003109 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003110 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003111 page_entries = 0;
3112 do {
3113 *base_indirect_ptr = cpu_to_le64(tmp);
3114 tmp += DB_PAGE_SIZE;
3115 base_indirect_ptr++;
3116 page_entries++;
3117 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003118 cqicb->sbq_addr =
3119 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003120 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003121 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003122 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3123 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003124 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003125 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003126 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003127 rx_ring->sbq_clean_idx = 0;
3128 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003129 }
3130 switch (rx_ring->type) {
3131 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003132 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3133 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3134 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003135 case RX_Q:
3136 /* Inbound completion handling rx_rings run in
3137 * separate NAPI contexts.
3138 */
3139 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3140 64);
3141 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3142 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3143 break;
3144 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003145 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3146 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003147 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003148 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3149 CFG_LCQ, rx_ring->cq_id);
3150 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003151 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003152 return err;
3153 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 return err;
3155}
3156
3157static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3158{
3159 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3160 void __iomem *doorbell_area =
3161 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3162 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3163 (tx_ring->wq_id * sizeof(u64));
3164 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3165 (tx_ring->wq_id * sizeof(u64));
3166 int err = 0;
3167
3168 /*
3169 * Assign doorbell registers for this tx_ring.
3170 */
3171 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003172 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003173 tx_ring->prod_idx = 0;
3174 /* TX PCI doorbell mem area + 0x04 */
3175 tx_ring->valid_db_reg = doorbell_area + 0x04;
3176
3177 /*
3178 * Assign shadow registers for this tx_ring.
3179 */
3180 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3181 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3182
3183 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3184 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3185 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3186 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3187 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003188 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003189
Ron Mercer97345522009-01-09 11:31:50 +00003190 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003191
3192 ql_init_tx_ring(qdev, tx_ring);
3193
Ron Mercere3324712009-07-02 06:06:13 +00003194 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003195 (u16) tx_ring->wq_id);
3196 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003197 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198 return err;
3199 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003200 return err;
3201}
3202
3203static void ql_disable_msix(struct ql_adapter *qdev)
3204{
3205 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3206 pci_disable_msix(qdev->pdev);
3207 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3208 kfree(qdev->msi_x_entry);
3209 qdev->msi_x_entry = NULL;
3210 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3211 pci_disable_msi(qdev->pdev);
3212 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3213 }
3214}
3215
Ron Mercera4ab6132009-08-27 11:02:10 +00003216/* We start by trying to get the number of vectors
3217 * stored in qdev->intr_count. If we don't get that
3218 * many then we reduce the count and try again.
3219 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003220static void ql_enable_msix(struct ql_adapter *qdev)
3221{
Ron Mercera4ab6132009-08-27 11:02:10 +00003222 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003223
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003224 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003225 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226 /* Try to alloc space for the msix struct,
3227 * if it fails then go to MSI/legacy.
3228 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003229 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 sizeof(struct msix_entry),
3231 GFP_KERNEL);
3232 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003233 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003234 goto msi;
3235 }
3236
Ron Mercera4ab6132009-08-27 11:02:10 +00003237 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003238 qdev->msi_x_entry[i].entry = i;
3239
Ron Mercera4ab6132009-08-27 11:02:10 +00003240 /* Loop to get our vectors. We start with
3241 * what we want and settle for what we get.
3242 */
3243 do {
3244 err = pci_enable_msix(qdev->pdev,
3245 qdev->msi_x_entry, qdev->intr_count);
3246 if (err > 0)
3247 qdev->intr_count = err;
3248 } while (err > 0);
3249
3250 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003251 kfree(qdev->msi_x_entry);
3252 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003253 netif_warn(qdev, ifup, qdev->ndev,
3254 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003255 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003256 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003257 } else if (err == 0) {
3258 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003259 netif_info(qdev, ifup, qdev->ndev,
3260 "MSI-X Enabled, got %d vectors.\n",
3261 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003262 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003263 }
3264 }
3265msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003266 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003267 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003268 if (!pci_enable_msi(qdev->pdev)) {
3269 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003270 netif_info(qdev, ifup, qdev->ndev,
3271 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003272 return;
3273 }
3274 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003275 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003276 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3277 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003278}
3279
Ron Mercer39aa8162009-08-27 11:02:11 +00003280/* Each vector services 1 RSS ring and and 1 or more
3281 * TX completion rings. This function loops through
3282 * the TX completion rings and assigns the vector that
3283 * will service it. An example would be if there are
3284 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3285 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003286 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003287 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3288 */
3289static void ql_set_tx_vect(struct ql_adapter *qdev)
3290{
3291 int i, j, vect;
3292 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3293
3294 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3295 /* Assign irq vectors to TX rx_rings.*/
3296 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3297 i < qdev->rx_ring_count; i++) {
3298 if (j == tx_rings_per_vector) {
3299 vect++;
3300 j = 0;
3301 }
3302 qdev->rx_ring[i].irq = vect;
3303 j++;
3304 }
3305 } else {
3306 /* For single vector all rings have an irq
3307 * of zero.
3308 */
3309 for (i = 0; i < qdev->rx_ring_count; i++)
3310 qdev->rx_ring[i].irq = 0;
3311 }
3312}
3313
3314/* Set the interrupt mask for this vector. Each vector
3315 * will service 1 RSS ring and 1 or more TX completion
3316 * rings. This function sets up a bit mask per vector
3317 * that indicates which rings it services.
3318 */
3319static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3320{
3321 int j, vect = ctx->intr;
3322 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3323
3324 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3325 /* Add the RSS ring serviced by this vector
3326 * to the mask.
3327 */
3328 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3329 /* Add the TX ring(s) serviced by this vector
3330 * to the mask. */
3331 for (j = 0; j < tx_rings_per_vector; j++) {
3332 ctx->irq_mask |=
3333 (1 << qdev->rx_ring[qdev->rss_ring_count +
3334 (vect * tx_rings_per_vector) + j].cq_id);
3335 }
3336 } else {
3337 /* For single vector we just shift each queue's
3338 * ID into the mask.
3339 */
3340 for (j = 0; j < qdev->rx_ring_count; j++)
3341 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3342 }
3343}
3344
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003345/*
3346 * Here we build the intr_context structures based on
3347 * our rx_ring count and intr vector count.
3348 * The intr_context structure is used to hook each vector
3349 * to possibly different handlers.
3350 */
3351static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3352{
3353 int i = 0;
3354 struct intr_context *intr_context = &qdev->intr_context[0];
3355
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003356 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3357 /* Each rx_ring has it's
3358 * own intr_context since we have separate
3359 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003360 */
3361 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3362 qdev->rx_ring[i].irq = i;
3363 intr_context->intr = i;
3364 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003365 /* Set up this vector's bit-mask that indicates
3366 * which queues it services.
3367 */
3368 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003369 /*
3370 * We set up each vectors enable/disable/read bits so
3371 * there's no bit/mask calculations in the critical path.
3372 */
3373 intr_context->intr_en_mask =
3374 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3375 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3376 | i;
3377 intr_context->intr_dis_mask =
3378 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3379 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3380 INTR_EN_IHD | i;
3381 intr_context->intr_read_mask =
3382 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3383 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3384 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003385 if (i == 0) {
3386 /* The first vector/queue handles
3387 * broadcast/multicast, fatal errors,
3388 * and firmware events. This in addition
3389 * to normal inbound NAPI processing.
3390 */
3391 intr_context->handler = qlge_isr;
3392 sprintf(intr_context->name, "%s-rx-%d",
3393 qdev->ndev->name, i);
3394 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003395 /*
3396 * Inbound queues handle unicast frames only.
3397 */
3398 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003399 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003400 qdev->ndev->name, i);
3401 }
3402 }
3403 } else {
3404 /*
3405 * All rx_rings use the same intr_context since
3406 * there is only one vector.
3407 */
3408 intr_context->intr = 0;
3409 intr_context->qdev = qdev;
3410 /*
3411 * We set up each vectors enable/disable/read bits so
3412 * there's no bit/mask calculations in the critical path.
3413 */
3414 intr_context->intr_en_mask =
3415 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3416 intr_context->intr_dis_mask =
3417 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3418 INTR_EN_TYPE_DISABLE;
3419 intr_context->intr_read_mask =
3420 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3421 /*
3422 * Single interrupt means one handler for all rings.
3423 */
3424 intr_context->handler = qlge_isr;
3425 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003426 /* Set up this vector's bit-mask that indicates
3427 * which queues it services. In this case there is
3428 * a single vector so it will service all RSS and
3429 * TX completion rings.
3430 */
3431 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003432 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003433 /* Tell the TX completion rings which MSIx vector
3434 * they will be using.
3435 */
3436 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003437}
3438
3439static void ql_free_irq(struct ql_adapter *qdev)
3440{
3441 int i;
3442 struct intr_context *intr_context = &qdev->intr_context[0];
3443
3444 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3445 if (intr_context->hooked) {
3446 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3447 free_irq(qdev->msi_x_entry[i].vector,
3448 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003449 } else {
3450 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003451 }
3452 }
3453 }
3454 ql_disable_msix(qdev);
3455}
3456
3457static int ql_request_irq(struct ql_adapter *qdev)
3458{
3459 int i;
3460 int status = 0;
3461 struct pci_dev *pdev = qdev->pdev;
3462 struct intr_context *intr_context = &qdev->intr_context[0];
3463
3464 ql_resolve_queues_to_irqs(qdev);
3465
3466 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3467 atomic_set(&intr_context->irq_cnt, 0);
3468 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3469 status = request_irq(qdev->msi_x_entry[i].vector,
3470 intr_context->handler,
3471 0,
3472 intr_context->name,
3473 &qdev->rx_ring[i]);
3474 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003475 netif_err(qdev, ifup, qdev->ndev,
3476 "Failed request for MSIX interrupt %d.\n",
3477 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003478 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003479 }
3480 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003481 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3482 "trying msi or legacy interrupts.\n");
3483 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3484 "%s: irq = %d.\n", __func__, pdev->irq);
3485 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3486 "%s: context->name = %s.\n", __func__,
3487 intr_context->name);
3488 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3489 "%s: dev_id = 0x%p.\n", __func__,
3490 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003491 status =
3492 request_irq(pdev->irq, qlge_isr,
3493 test_bit(QL_MSI_ENABLED,
3494 &qdev->
3495 flags) ? 0 : IRQF_SHARED,
3496 intr_context->name, &qdev->rx_ring[0]);
3497 if (status)
3498 goto err_irq;
3499
Joe Perchesae9540f72010-02-09 11:49:52 +00003500 netif_err(qdev, ifup, qdev->ndev,
3501 "Hooked intr %d, queue type %s, with name %s.\n",
3502 i,
3503 qdev->rx_ring[0].type == DEFAULT_Q ?
3504 "DEFAULT_Q" :
3505 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3506 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3507 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003508 }
3509 intr_context->hooked = 1;
3510 }
3511 return status;
3512err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003513 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003514 ql_free_irq(qdev);
3515 return status;
3516}
3517
3518static int ql_start_rss(struct ql_adapter *qdev)
3519{
Joe Perches215faf92010-12-21 02:16:10 -08003520 static const u8 init_hash_seed[] = {
3521 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3522 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3523 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3524 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3525 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3526 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003527 struct ricb *ricb = &qdev->ricb;
3528 int status = 0;
3529 int i;
3530 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3531
Ron Mercere3324712009-07-02 06:06:13 +00003532 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003533
Ron Mercerb2014ff2009-08-27 11:02:09 +00003534 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003535 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003536 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3537 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003538
3539 /*
3540 * Fill out the Indirection Table.
3541 */
Ron Mercer541ae282009-10-08 09:54:37 +00003542 for (i = 0; i < 1024; i++)
3543 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544
Ron Mercer541ae282009-10-08 09:54:37 +00003545 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3546 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003547
Ron Mercere3324712009-07-02 06:06:13 +00003548 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003549 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003550 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003551 return status;
3552 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553 return status;
3554}
3555
Ron Mercera5f59dc2009-07-02 06:06:07 +00003556static int ql_clear_routing_entries(struct ql_adapter *qdev)
3557{
3558 int i, status = 0;
3559
3560 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3561 if (status)
3562 return status;
3563 /* Clear all the entries in the routing table. */
3564 for (i = 0; i < 16; i++) {
3565 status = ql_set_routing_reg(qdev, i, 0, 0);
3566 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003567 netif_err(qdev, ifup, qdev->ndev,
3568 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003569 break;
3570 }
3571 }
3572 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3573 return status;
3574}
3575
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003576/* Initialize the frame-to-queue routing. */
3577static int ql_route_initialize(struct ql_adapter *qdev)
3578{
3579 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003580
3581 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003582 status = ql_clear_routing_entries(qdev);
3583 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003584 return status;
3585
3586 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3587 if (status)
3588 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003589
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003590 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3591 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003592 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003593 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003594 "Failed to init routing register "
3595 "for IP CSUM error packets.\n");
3596 goto exit;
3597 }
3598 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3599 RT_IDX_TU_CSUM_ERR, 1);
3600 if (status) {
3601 netif_err(qdev, ifup, qdev->ndev,
3602 "Failed to init routing register "
3603 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003604 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003605 }
3606 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3607 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003608 netif_err(qdev, ifup, qdev->ndev,
3609 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003610 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003611 }
3612 /* If we have more than one inbound queue, then turn on RSS in the
3613 * routing block.
3614 */
3615 if (qdev->rss_ring_count > 1) {
3616 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3617 RT_IDX_RSS_MATCH, 1);
3618 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003619 netif_err(qdev, ifup, qdev->ndev,
3620 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003621 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003622 }
3623 }
3624
3625 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3626 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003627 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003628 netif_err(qdev, ifup, qdev->ndev,
3629 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003630exit:
3631 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003632 return status;
3633}
3634
Ron Mercer2ee1e272009-03-03 12:10:33 +00003635int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003636{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003637 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003638
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003639 /* If check if the link is up and use to
3640 * determine if we are setting or clearing
3641 * the MAC address in the CAM.
3642 */
3643 set = ql_read32(qdev, STS);
3644 set &= qdev->port_link_up;
3645 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003646 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003647 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003648 return status;
3649 }
3650
3651 status = ql_route_initialize(qdev);
3652 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003653 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003654
3655 return status;
3656}
3657
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003658static int ql_adapter_initialize(struct ql_adapter *qdev)
3659{
3660 u32 value, mask;
3661 int i;
3662 int status = 0;
3663
3664 /*
3665 * Set up the System register to halt on errors.
3666 */
3667 value = SYS_EFE | SYS_FAE;
3668 mask = value << 16;
3669 ql_write32(qdev, SYS, mask | value);
3670
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003671 /* Set the default queue, and VLAN behavior. */
3672 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3673 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003674 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3675
3676 /* Set the MPI interrupt to enabled. */
3677 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3678
3679 /* Enable the function, set pagesize, enable error checking. */
3680 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003681 FSC_EC | FSC_VM_PAGE_4K;
3682 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683
3684 /* Set/clear header splitting. */
3685 mask = FSC_VM_PAGESIZE_MASK |
3686 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3687 ql_write32(qdev, FSC, mask | value);
3688
Ron Mercer572c5262010-01-02 10:37:42 +00003689 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003690
Ron Mercera3b71932009-10-08 09:54:38 +00003691 /* Set RX packet routing to use port/pci function on which the
3692 * packet arrived on in addition to usual frame routing.
3693 * This is helpful on bonding where both interfaces can have
3694 * the same MAC address.
3695 */
3696 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003697 /* Reroute all packets to our Interface.
3698 * They may have been routed to MPI firmware
3699 * due to WOL.
3700 */
3701 value = ql_read32(qdev, MGMT_RCV_CFG);
3702 value &= ~MGMT_RCV_CFG_RM;
3703 mask = 0xffff0000;
3704
3705 /* Sticky reg needs clearing due to WOL. */
3706 ql_write32(qdev, MGMT_RCV_CFG, mask);
3707 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3708
3709 /* Default WOL is enable on Mezz cards */
3710 if (qdev->pdev->subsystem_device == 0x0068 ||
3711 qdev->pdev->subsystem_device == 0x0180)
3712 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003713
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003714 /* Start up the rx queues. */
3715 for (i = 0; i < qdev->rx_ring_count; i++) {
3716 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3717 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003718 netif_err(qdev, ifup, qdev->ndev,
3719 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003720 return status;
3721 }
3722 }
3723
3724 /* If there is more than one inbound completion queue
3725 * then download a RICB to configure RSS.
3726 */
3727 if (qdev->rss_ring_count > 1) {
3728 status = ql_start_rss(qdev);
3729 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003730 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003731 return status;
3732 }
3733 }
3734
3735 /* Start up the tx queues. */
3736 for (i = 0; i < qdev->tx_ring_count; i++) {
3737 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3738 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003739 netif_err(qdev, ifup, qdev->ndev,
3740 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003741 return status;
3742 }
3743 }
3744
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003745 /* Initialize the port and set the max framesize. */
3746 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003747 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003748 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003749
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003750 /* Set up the MAC address and frame routing filter. */
3751 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003752 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003753 netif_err(qdev, ifup, qdev->ndev,
3754 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003755 return status;
3756 }
3757
3758 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003759 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003760 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003761
3762 return status;
3763}
3764
3765/* Issue soft reset to chip. */
3766static int ql_adapter_reset(struct ql_adapter *qdev)
3767{
3768 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003769 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003770 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003771
Ron Mercera5f59dc2009-07-02 06:06:07 +00003772 /* Clear all the entries in the routing table. */
3773 status = ql_clear_routing_entries(qdev);
3774 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003775 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003776 return status;
3777 }
3778
3779 end_jiffies = jiffies +
3780 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003781
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003782 /* Check if bit is set then skip the mailbox command and
3783 * clear the bit, else we are in normal reset process.
3784 */
3785 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3786 /* Stop management traffic. */
3787 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003788
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003789 /* Wait for the NIC and MGMNT FIFOs to empty. */
3790 ql_wait_fifo_empty(qdev);
3791 } else
3792 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003793
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003794 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003795
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003796 do {
3797 value = ql_read32(qdev, RST_FO);
3798 if ((value & RST_FO_FR) == 0)
3799 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003800 cpu_relax();
3801 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003802
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003803 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003804 netif_err(qdev, ifdown, qdev->ndev,
3805 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003806 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003807 }
3808
Ron Mercer84087f42009-10-08 09:54:41 +00003809 /* Resume management traffic. */
3810 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003811 return status;
3812}
3813
3814static void ql_display_dev_info(struct net_device *ndev)
3815{
Joe Perchesb16fed02010-11-15 11:12:28 +00003816 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003817
Joe Perchesae9540f72010-02-09 11:49:52 +00003818 netif_info(qdev, probe, qdev->ndev,
3819 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3820 "XG Roll = %d, XG Rev = %d.\n",
3821 qdev->func,
3822 qdev->port,
3823 qdev->chip_rev_id & 0x0000000f,
3824 qdev->chip_rev_id >> 4 & 0x0000000f,
3825 qdev->chip_rev_id >> 8 & 0x0000000f,
3826 qdev->chip_rev_id >> 12 & 0x0000000f);
3827 netif_info(qdev, probe, qdev->ndev,
3828 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003829}
3830
stephen hemmingerac409212010-10-21 07:50:54 +00003831static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003832{
3833 int status = 0;
3834 u32 wol = MB_WOL_DISABLE;
3835
3836 /* The CAM is still intact after a reset, but if we
3837 * are doing WOL, then we may need to program the
3838 * routing regs. We would also need to issue the mailbox
3839 * commands to instruct the MPI what to do per the ethtool
3840 * settings.
3841 */
3842
3843 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3844 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003845 netif_err(qdev, ifdown, qdev->ndev,
3846 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3847 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003848 return -EINVAL;
3849 }
3850
3851 if (qdev->wol & WAKE_MAGIC) {
3852 status = ql_mb_wol_set_magic(qdev, 1);
3853 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003854 netif_err(qdev, ifdown, qdev->ndev,
3855 "Failed to set magic packet on %s.\n",
3856 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003857 return status;
3858 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003859 netif_info(qdev, drv, qdev->ndev,
3860 "Enabled magic packet successfully on %s.\n",
3861 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003862
3863 wol |= MB_WOL_MAGIC_PKT;
3864 }
3865
3866 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003867 wol |= MB_WOL_MODE_ON;
3868 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003869 netif_err(qdev, drv, qdev->ndev,
3870 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003871 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003872 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003873 }
3874
3875 return status;
3876}
3877
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003878static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003879{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003880
Ron Mercer6497b602009-02-12 16:37:13 -08003881 /* Don't kill the reset worker thread if we
3882 * are in the process of recovery.
3883 */
3884 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3885 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003886 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3887 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003888 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003889 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003890 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003891}
3892
3893static int ql_adapter_down(struct ql_adapter *qdev)
3894{
3895 int i, status = 0;
3896
3897 ql_link_off(qdev);
3898
3899 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003900
Ron Mercer39aa8162009-08-27 11:02:11 +00003901 for (i = 0; i < qdev->rss_ring_count; i++)
3902 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003903
3904 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3905
3906 ql_disable_interrupts(qdev);
3907
3908 ql_tx_ring_clean(qdev);
3909
Ron Mercer6b318cb2009-03-09 10:59:26 +00003910 /* Call netif_napi_del() from common point.
3911 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003912 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003913 netif_napi_del(&qdev->rx_ring[i].napi);
3914
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003915 status = ql_adapter_reset(qdev);
3916 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003917 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3918 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003919 ql_free_rx_buffers(qdev);
3920
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003921 return status;
3922}
3923
3924static int ql_adapter_up(struct ql_adapter *qdev)
3925{
3926 int err = 0;
3927
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003928 err = ql_adapter_initialize(qdev);
3929 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003930 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003931 goto err_init;
3932 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003934 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003935 /* If the port is initialized and the
3936 * link is up the turn on the carrier.
3937 */
3938 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3939 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003940 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003941 /* Restore rx mode. */
3942 clear_bit(QL_ALLMULTI, &qdev->flags);
3943 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3944 qlge_set_multicast_list(qdev->ndev);
3945
Ron Mercerc1b60092010-10-27 04:58:12 +00003946 /* Restore vlan setting. */
3947 qlge_restore_vlan(qdev);
3948
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003949 ql_enable_interrupts(qdev);
3950 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003951 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003952
3953 return 0;
3954err_init:
3955 ql_adapter_reset(qdev);
3956 return err;
3957}
3958
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003959static void ql_release_adapter_resources(struct ql_adapter *qdev)
3960{
3961 ql_free_mem_resources(qdev);
3962 ql_free_irq(qdev);
3963}
3964
3965static int ql_get_adapter_resources(struct ql_adapter *qdev)
3966{
3967 int status = 0;
3968
3969 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003970 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003971 return -ENOMEM;
3972 }
3973 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003974 return status;
3975}
3976
3977static int qlge_close(struct net_device *ndev)
3978{
3979 struct ql_adapter *qdev = netdev_priv(ndev);
3980
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003981 /* If we hit pci_channel_io_perm_failure
3982 * failure condition, then we already
3983 * brought the adapter down.
3984 */
3985 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003986 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003987 clear_bit(QL_EEH_FATAL, &qdev->flags);
3988 return 0;
3989 }
3990
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003991 /*
3992 * Wait for device to recover from a reset.
3993 * (Rarely happens, but possible.)
3994 */
3995 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3996 msleep(1);
3997 ql_adapter_down(qdev);
3998 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003999 return 0;
4000}
4001
4002static int ql_configure_rings(struct ql_adapter *qdev)
4003{
4004 int i;
4005 struct rx_ring *rx_ring;
4006 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004007 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004008 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4009 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4010
4011 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004012
Ron Mercera4ab6132009-08-27 11:02:10 +00004013 /* In a perfect world we have one RSS ring for each CPU
4014 * and each has it's own vector. To do that we ask for
4015 * cpu_cnt vectors. ql_enable_msix() will adjust the
4016 * vector count to what we actually get. We then
4017 * allocate an RSS ring for each.
4018 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004019 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004020 qdev->intr_count = cpu_cnt;
4021 ql_enable_msix(qdev);
4022 /* Adjust the RSS ring count to the actual vector count. */
4023 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004024 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004025 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004026
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004027 for (i = 0; i < qdev->tx_ring_count; i++) {
4028 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004029 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004030 tx_ring->qdev = qdev;
4031 tx_ring->wq_id = i;
4032 tx_ring->wq_len = qdev->tx_ring_size;
4033 tx_ring->wq_size =
4034 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4035
4036 /*
4037 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004038 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004039 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004040 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004041 }
4042
4043 for (i = 0; i < qdev->rx_ring_count; i++) {
4044 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004045 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004046 rx_ring->qdev = qdev;
4047 rx_ring->cq_id = i;
4048 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004049 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004050 /*
4051 * Inbound (RSS) queues.
4052 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004053 rx_ring->cq_len = qdev->rx_ring_size;
4054 rx_ring->cq_size =
4055 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4056 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4057 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004058 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004059 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004060 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4061 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004062 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004063 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004064 rx_ring->type = RX_Q;
4065 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004066 /*
4067 * Outbound queue handles outbound completions only.
4068 */
4069 /* outbound cq is same size as tx_ring it services. */
4070 rx_ring->cq_len = qdev->tx_ring_size;
4071 rx_ring->cq_size =
4072 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4073 rx_ring->lbq_len = 0;
4074 rx_ring->lbq_size = 0;
4075 rx_ring->lbq_buf_size = 0;
4076 rx_ring->sbq_len = 0;
4077 rx_ring->sbq_size = 0;
4078 rx_ring->sbq_buf_size = 0;
4079 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004080 }
4081 }
4082 return 0;
4083}
4084
4085static int qlge_open(struct net_device *ndev)
4086{
4087 int err = 0;
4088 struct ql_adapter *qdev = netdev_priv(ndev);
4089
Ron Mercer74e12432009-11-11 12:54:04 +00004090 err = ql_adapter_reset(qdev);
4091 if (err)
4092 return err;
4093
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004094 err = ql_configure_rings(qdev);
4095 if (err)
4096 return err;
4097
4098 err = ql_get_adapter_resources(qdev);
4099 if (err)
4100 goto error_up;
4101
4102 err = ql_adapter_up(qdev);
4103 if (err)
4104 goto error_up;
4105
4106 return err;
4107
4108error_up:
4109 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004110 return err;
4111}
4112
Ron Mercer7c734352009-10-19 03:32:19 +00004113static int ql_change_rx_buffers(struct ql_adapter *qdev)
4114{
4115 struct rx_ring *rx_ring;
4116 int i, status;
4117 u32 lbq_buf_len;
4118
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004119 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004120 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4121 int i = 3;
4122 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004123 netif_err(qdev, ifup, qdev->ndev,
4124 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004125 ssleep(1);
4126 }
4127
4128 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004129 netif_err(qdev, ifup, qdev->ndev,
4130 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004131 return -ETIMEDOUT;
4132 }
4133 }
4134
4135 status = ql_adapter_down(qdev);
4136 if (status)
4137 goto error;
4138
4139 /* Get the new rx buffer size. */
4140 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4141 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4142 qdev->lbq_buf_order = get_order(lbq_buf_len);
4143
4144 for (i = 0; i < qdev->rss_ring_count; i++) {
4145 rx_ring = &qdev->rx_ring[i];
4146 /* Set the new size. */
4147 rx_ring->lbq_buf_size = lbq_buf_len;
4148 }
4149
4150 status = ql_adapter_up(qdev);
4151 if (status)
4152 goto error;
4153
4154 return status;
4155error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004156 netif_alert(qdev, ifup, qdev->ndev,
4157 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004158 set_bit(QL_ADAPTER_UP, &qdev->flags);
4159 dev_close(qdev->ndev);
4160 return status;
4161}
4162
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004163static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4164{
4165 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004166 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004167
4168 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004169 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004170 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004171 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004172 } else
4173 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004174
4175 queue_delayed_work(qdev->workqueue,
4176 &qdev->mpi_port_cfg_work, 3*HZ);
4177
Breno Leitao746079d2010-02-04 10:11:19 +00004178 ndev->mtu = new_mtu;
4179
Ron Mercer7c734352009-10-19 03:32:19 +00004180 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004181 return 0;
4182 }
4183
Ron Mercer7c734352009-10-19 03:32:19 +00004184 status = ql_change_rx_buffers(qdev);
4185 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004186 netif_err(qdev, ifup, qdev->ndev,
4187 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004188 }
4189
4190 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004191}
4192
4193static struct net_device_stats *qlge_get_stats(struct net_device
4194 *ndev)
4195{
Ron Mercer885ee392009-11-03 13:49:31 +00004196 struct ql_adapter *qdev = netdev_priv(ndev);
4197 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4198 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4199 unsigned long pkts, mcast, dropped, errors, bytes;
4200 int i;
4201
4202 /* Get RX stats. */
4203 pkts = mcast = dropped = errors = bytes = 0;
4204 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4205 pkts += rx_ring->rx_packets;
4206 bytes += rx_ring->rx_bytes;
4207 dropped += rx_ring->rx_dropped;
4208 errors += rx_ring->rx_errors;
4209 mcast += rx_ring->rx_multicast;
4210 }
4211 ndev->stats.rx_packets = pkts;
4212 ndev->stats.rx_bytes = bytes;
4213 ndev->stats.rx_dropped = dropped;
4214 ndev->stats.rx_errors = errors;
4215 ndev->stats.multicast = mcast;
4216
4217 /* Get TX stats. */
4218 pkts = errors = bytes = 0;
4219 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4220 pkts += tx_ring->tx_packets;
4221 bytes += tx_ring->tx_bytes;
4222 errors += tx_ring->tx_errors;
4223 }
4224 ndev->stats.tx_packets = pkts;
4225 ndev->stats.tx_bytes = bytes;
4226 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004227 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004228}
4229
stephen hemmingerac409212010-10-21 07:50:54 +00004230static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004231{
Joe Perchesb16fed02010-11-15 11:12:28 +00004232 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004233 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004234 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004235
Ron Mercercc288f52009-02-23 10:42:14 +00004236 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4237 if (status)
4238 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004239 /*
4240 * Set or clear promiscuous mode if a
4241 * transition is taking place.
4242 */
4243 if (ndev->flags & IFF_PROMISC) {
4244 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4245 if (ql_set_routing_reg
4246 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004247 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004248 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004249 } else {
4250 set_bit(QL_PROMISCUOUS, &qdev->flags);
4251 }
4252 }
4253 } else {
4254 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4255 if (ql_set_routing_reg
4256 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004257 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004258 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004259 } else {
4260 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4261 }
4262 }
4263 }
4264
4265 /*
4266 * Set or clear all multicast mode if a
4267 * transition is taking place.
4268 */
4269 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004270 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004271 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4272 if (ql_set_routing_reg
4273 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004274 netif_err(qdev, hw, qdev->ndev,
4275 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004276 } else {
4277 set_bit(QL_ALLMULTI, &qdev->flags);
4278 }
4279 }
4280 } else {
4281 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4282 if (ql_set_routing_reg
4283 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004284 netif_err(qdev, hw, qdev->ndev,
4285 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004286 } else {
4287 clear_bit(QL_ALLMULTI, &qdev->flags);
4288 }
4289 }
4290 }
4291
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004292 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004293 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4294 if (status)
4295 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004296 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004297 netdev_for_each_mc_addr(ha, ndev) {
4298 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004299 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004300 netif_err(qdev, hw, qdev->ndev,
4301 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004302 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004303 goto exit;
4304 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004305 i++;
4306 }
Ron Mercercc288f52009-02-23 10:42:14 +00004307 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004308 if (ql_set_routing_reg
4309 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004310 netif_err(qdev, hw, qdev->ndev,
4311 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004312 } else {
4313 set_bit(QL_ALLMULTI, &qdev->flags);
4314 }
4315 }
4316exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004317 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004318}
4319
4320static int qlge_set_mac_address(struct net_device *ndev, void *p)
4321{
Joe Perchesb16fed02010-11-15 11:12:28 +00004322 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004323 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004324 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004325
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004326 if (!is_valid_ether_addr(addr->sa_data))
4327 return -EADDRNOTAVAIL;
4328 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004329 /* Update local copy of current mac address. */
4330 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004331
Ron Mercercc288f52009-02-23 10:42:14 +00004332 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4333 if (status)
4334 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004335 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4336 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004337 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004338 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004339 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4340 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004341}
4342
4343static void qlge_tx_timeout(struct net_device *ndev)
4344{
Joe Perchesb16fed02010-11-15 11:12:28 +00004345 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004346 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004347}
4348
4349static void ql_asic_reset_work(struct work_struct *work)
4350{
4351 struct ql_adapter *qdev =
4352 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004353 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004354 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004355 status = ql_adapter_down(qdev);
4356 if (status)
4357 goto error;
4358
4359 status = ql_adapter_up(qdev);
4360 if (status)
4361 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004362
4363 /* Restore rx mode. */
4364 clear_bit(QL_ALLMULTI, &qdev->flags);
4365 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4366 qlge_set_multicast_list(qdev->ndev);
4367
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004368 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004369 return;
4370error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004371 netif_alert(qdev, ifup, qdev->ndev,
4372 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004373
Ron Mercerdb988122009-03-09 10:59:17 +00004374 set_bit(QL_ADAPTER_UP, &qdev->flags);
4375 dev_close(qdev->ndev);
4376 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004377}
4378
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004379static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004380 .get_flash = ql_get_8012_flash_params,
4381 .port_initialize = ql_8012_port_initialize,
4382};
4383
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004384static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004385 .get_flash = ql_get_8000_flash_params,
4386 .port_initialize = ql_8000_port_initialize,
4387};
4388
Ron Mercere4552f52009-06-09 05:39:32 +00004389/* Find the pcie function number for the other NIC
4390 * on this chip. Since both NIC functions share a
4391 * common firmware we have the lowest enabled function
4392 * do any common work. Examples would be resetting
4393 * after a fatal firmware error, or doing a firmware
4394 * coredump.
4395 */
4396static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004397{
Ron Mercere4552f52009-06-09 05:39:32 +00004398 int status = 0;
4399 u32 temp;
4400 u32 nic_func1, nic_func2;
4401
4402 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4403 &temp);
4404 if (status)
4405 return status;
4406
4407 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4408 MPI_TEST_NIC_FUNC_MASK);
4409 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4410 MPI_TEST_NIC_FUNC_MASK);
4411
4412 if (qdev->func == nic_func1)
4413 qdev->alt_func = nic_func2;
4414 else if (qdev->func == nic_func2)
4415 qdev->alt_func = nic_func1;
4416 else
4417 status = -EIO;
4418
4419 return status;
4420}
4421
4422static int ql_get_board_info(struct ql_adapter *qdev)
4423{
4424 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004425 qdev->func =
4426 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004427 if (qdev->func > 3)
4428 return -EIO;
4429
4430 status = ql_get_alt_pcie_func(qdev);
4431 if (status)
4432 return status;
4433
4434 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4435 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004436 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4437 qdev->port_link_up = STS_PL1;
4438 qdev->port_init = STS_PI1;
4439 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4440 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4441 } else {
4442 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4443 qdev->port_link_up = STS_PL0;
4444 qdev->port_init = STS_PI0;
4445 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4446 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4447 }
4448 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004449 qdev->device_id = qdev->pdev->device;
4450 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4451 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004452 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4453 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004454 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004455}
4456
4457static void ql_release_all(struct pci_dev *pdev)
4458{
4459 struct net_device *ndev = pci_get_drvdata(pdev);
4460 struct ql_adapter *qdev = netdev_priv(ndev);
4461
4462 if (qdev->workqueue) {
4463 destroy_workqueue(qdev->workqueue);
4464 qdev->workqueue = NULL;
4465 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004466
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004467 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004468 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004469 if (qdev->doorbell_area)
4470 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004471 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004472 pci_release_regions(pdev);
4473 pci_set_drvdata(pdev, NULL);
4474}
4475
4476static int __devinit ql_init_device(struct pci_dev *pdev,
4477 struct net_device *ndev, int cards_found)
4478{
4479 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004480 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004481
Ron Mercere3324712009-07-02 06:06:13 +00004482 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004483 err = pci_enable_device(pdev);
4484 if (err) {
4485 dev_err(&pdev->dev, "PCI device enable failed.\n");
4486 return err;
4487 }
4488
Ron Mercerebd6e772009-09-29 08:39:25 +00004489 qdev->ndev = ndev;
4490 qdev->pdev = pdev;
4491 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004492
Ron Mercerbc9167f2009-10-10 09:35:04 +00004493 /* Set PCIe read request size */
4494 err = pcie_set_readrq(pdev, 4096);
4495 if (err) {
4496 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004497 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004498 }
4499
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004500 err = pci_request_regions(pdev, DRV_NAME);
4501 if (err) {
4502 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004503 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004504 }
4505
4506 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004507 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004508 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004509 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004510 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004511 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004512 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004513 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004514 }
4515
4516 if (err) {
4517 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004518 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004519 }
4520
Ron Mercer73475332009-11-06 07:44:58 +00004521 /* Set PCIe reset type for EEH to fundamental. */
4522 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004523 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004524 qdev->reg_base =
4525 ioremap_nocache(pci_resource_start(pdev, 1),
4526 pci_resource_len(pdev, 1));
4527 if (!qdev->reg_base) {
4528 dev_err(&pdev->dev, "Register mapping failed.\n");
4529 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004530 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004531 }
4532
4533 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4534 qdev->doorbell_area =
4535 ioremap_nocache(pci_resource_start(pdev, 3),
4536 pci_resource_len(pdev, 3));
4537 if (!qdev->doorbell_area) {
4538 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4539 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004540 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004541 }
4542
Ron Mercere4552f52009-06-09 05:39:32 +00004543 err = ql_get_board_info(qdev);
4544 if (err) {
4545 dev_err(&pdev->dev, "Register access failed.\n");
4546 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004547 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004548 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549 qdev->msg_enable = netif_msg_init(debug, default_msg);
4550 spin_lock_init(&qdev->hw_lock);
4551 spin_lock_init(&qdev->stats_lock);
4552
Ron Mercer8aae2602010-01-15 13:31:28 +00004553 if (qlge_mpi_coredump) {
4554 qdev->mpi_coredump =
4555 vmalloc(sizeof(struct ql_mpi_coredump));
4556 if (qdev->mpi_coredump == NULL) {
4557 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4558 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004559 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004560 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004561 if (qlge_force_coredump)
4562 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004563 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004564 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004565 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 if (err) {
4567 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004568 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004569 }
4570
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004572 /* Keep local copy of current mac address. */
4573 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574
4575 /* Set up the default ring sizes. */
4576 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4577 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4578
4579 /* Set up the coalescing parameters. */
4580 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4581 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4582 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4583 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4584
4585 /*
4586 * Set up the operating parameters.
4587 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004588 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4589 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4590 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4591 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004592 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004593 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004594 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004595 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004596 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004597
4598 if (!cards_found) {
4599 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4600 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4601 DRV_NAME, DRV_VERSION);
4602 }
4603 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004604err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004605 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004606err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004607 pci_disable_device(pdev);
4608 return err;
4609}
4610
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004611static const struct net_device_ops qlge_netdev_ops = {
4612 .ndo_open = qlge_open,
4613 .ndo_stop = qlge_close,
4614 .ndo_start_xmit = qlge_send,
4615 .ndo_change_mtu = qlge_change_mtu,
4616 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004617 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004618 .ndo_set_mac_address = qlge_set_mac_address,
4619 .ndo_validate_addr = eth_validate_addr,
4620 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004621 .ndo_fix_features = qlge_fix_features,
4622 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004623 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4624 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004625};
4626
Ron Mercer15c052f2010-02-04 13:32:46 -08004627static void ql_timer(unsigned long data)
4628{
4629 struct ql_adapter *qdev = (struct ql_adapter *)data;
4630 u32 var = 0;
4631
4632 var = ql_read32(qdev, STS);
4633 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004634 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004635 return;
4636 }
4637
Breno Leitao72046d82010-07-01 03:00:17 +00004638 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004639}
4640
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004641static int __devinit qlge_probe(struct pci_dev *pdev,
4642 const struct pci_device_id *pci_entry)
4643{
4644 struct net_device *ndev = NULL;
4645 struct ql_adapter *qdev = NULL;
4646 static int cards_found = 0;
4647 int err = 0;
4648
Ron Mercer1e213302009-03-09 10:59:21 +00004649 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4650 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004651 if (!ndev)
4652 return -ENOMEM;
4653
4654 err = ql_init_device(pdev, ndev, cards_found);
4655 if (err < 0) {
4656 free_netdev(ndev);
4657 return err;
4658 }
4659
4660 qdev = netdev_priv(ndev);
4661 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004662 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4663 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4664 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4665 ndev->features = ndev->hw_features |
4666 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004667
4668 if (test_bit(QL_DMA64, &qdev->flags))
4669 ndev->features |= NETIF_F_HIGHDMA;
4670
4671 /*
4672 * Set up net_device structure.
4673 */
4674 ndev->tx_queue_len = qdev->tx_ring_size;
4675 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004676
4677 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004678 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004679 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004680
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004681 err = register_netdev(ndev);
4682 if (err) {
4683 dev_err(&pdev->dev, "net device registration failed.\n");
4684 ql_release_all(pdev);
4685 pci_disable_device(pdev);
4686 return err;
4687 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004688 /* Start up the timer to trigger EEH if
4689 * the bus goes dead
4690 */
4691 init_timer_deferrable(&qdev->timer);
4692 qdev->timer.data = (unsigned long)qdev;
4693 qdev->timer.function = ql_timer;
4694 qdev->timer.expires = jiffies + (5*HZ);
4695 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004696 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004697 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004698 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004699 cards_found++;
4700 return 0;
4701}
4702
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004703netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4704{
4705 return qlge_send(skb, ndev);
4706}
4707
4708int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4709{
4710 return ql_clean_inbound_rx_ring(rx_ring, budget);
4711}
4712
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004713static void __devexit qlge_remove(struct pci_dev *pdev)
4714{
4715 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004716 struct ql_adapter *qdev = netdev_priv(ndev);
4717 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004718 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004719 unregister_netdev(ndev);
4720 ql_release_all(pdev);
4721 pci_disable_device(pdev);
4722 free_netdev(ndev);
4723}
4724
Ron Mercer6d190c62009-10-28 08:39:20 +00004725/* Clean up resources without touching hardware. */
4726static void ql_eeh_close(struct net_device *ndev)
4727{
4728 int i;
4729 struct ql_adapter *qdev = netdev_priv(ndev);
4730
4731 if (netif_carrier_ok(ndev)) {
4732 netif_carrier_off(ndev);
4733 netif_stop_queue(ndev);
4734 }
4735
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004736 /* Disabling the timer */
4737 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004738 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004739
4740 for (i = 0; i < qdev->rss_ring_count; i++)
4741 netif_napi_del(&qdev->rx_ring[i].napi);
4742
4743 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4744 ql_tx_ring_clean(qdev);
4745 ql_free_rx_buffers(qdev);
4746 ql_release_adapter_resources(qdev);
4747}
4748
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004749/*
4750 * This callback is called by the PCI subsystem whenever
4751 * a PCI bus error is detected.
4752 */
4753static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4754 enum pci_channel_state state)
4755{
4756 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004757 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004758
Ron Mercer6d190c62009-10-28 08:39:20 +00004759 switch (state) {
4760 case pci_channel_io_normal:
4761 return PCI_ERS_RESULT_CAN_RECOVER;
4762 case pci_channel_io_frozen:
4763 netif_device_detach(ndev);
4764 if (netif_running(ndev))
4765 ql_eeh_close(ndev);
4766 pci_disable_device(pdev);
4767 return PCI_ERS_RESULT_NEED_RESET;
4768 case pci_channel_io_perm_failure:
4769 dev_err(&pdev->dev,
4770 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004771 ql_eeh_close(ndev);
4772 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004773 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004774 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004775
4776 /* Request a slot reset. */
4777 return PCI_ERS_RESULT_NEED_RESET;
4778}
4779
4780/*
4781 * This callback is called after the PCI buss has been reset.
4782 * Basically, this tries to restart the card from scratch.
4783 * This is a shortened version of the device probe/discovery code,
4784 * it resembles the first-half of the () routine.
4785 */
4786static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4787{
4788 struct net_device *ndev = pci_get_drvdata(pdev);
4789 struct ql_adapter *qdev = netdev_priv(ndev);
4790
Ron Mercer6d190c62009-10-28 08:39:20 +00004791 pdev->error_state = pci_channel_io_normal;
4792
4793 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004794 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004795 netif_err(qdev, ifup, qdev->ndev,
4796 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004797 return PCI_ERS_RESULT_DISCONNECT;
4798 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004799 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004800
4801 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004802 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004803 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004804 return PCI_ERS_RESULT_DISCONNECT;
4805 }
4806
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004807 return PCI_ERS_RESULT_RECOVERED;
4808}
4809
4810static void qlge_io_resume(struct pci_dev *pdev)
4811{
4812 struct net_device *ndev = pci_get_drvdata(pdev);
4813 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004814 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004815
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004816 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004817 err = qlge_open(ndev);
4818 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004819 netif_err(qdev, ifup, qdev->ndev,
4820 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004821 return;
4822 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004823 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004824 netif_err(qdev, ifup, qdev->ndev,
4825 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004826 }
Breno Leitao72046d82010-07-01 03:00:17 +00004827 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004828 netif_device_attach(ndev);
4829}
4830
4831static struct pci_error_handlers qlge_err_handler = {
4832 .error_detected = qlge_io_error_detected,
4833 .slot_reset = qlge_io_slot_reset,
4834 .resume = qlge_io_resume,
4835};
4836
4837static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4838{
4839 struct net_device *ndev = pci_get_drvdata(pdev);
4840 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004841 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004842
4843 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004844 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004845
4846 if (netif_running(ndev)) {
4847 err = ql_adapter_down(qdev);
4848 if (!err)
4849 return err;
4850 }
4851
Ron Mercerbc083ce2009-10-21 11:07:40 +00004852 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004853 err = pci_save_state(pdev);
4854 if (err)
4855 return err;
4856
4857 pci_disable_device(pdev);
4858
4859 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4860
4861 return 0;
4862}
4863
David S. Miller04da2cf2008-09-19 16:14:24 -07004864#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004865static int qlge_resume(struct pci_dev *pdev)
4866{
4867 struct net_device *ndev = pci_get_drvdata(pdev);
4868 struct ql_adapter *qdev = netdev_priv(ndev);
4869 int err;
4870
4871 pci_set_power_state(pdev, PCI_D0);
4872 pci_restore_state(pdev);
4873 err = pci_enable_device(pdev);
4874 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004875 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004876 return err;
4877 }
4878 pci_set_master(pdev);
4879
4880 pci_enable_wake(pdev, PCI_D3hot, 0);
4881 pci_enable_wake(pdev, PCI_D3cold, 0);
4882
4883 if (netif_running(ndev)) {
4884 err = ql_adapter_up(qdev);
4885 if (err)
4886 return err;
4887 }
4888
Breno Leitao72046d82010-07-01 03:00:17 +00004889 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004890 netif_device_attach(ndev);
4891
4892 return 0;
4893}
David S. Miller04da2cf2008-09-19 16:14:24 -07004894#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004895
4896static void qlge_shutdown(struct pci_dev *pdev)
4897{
4898 qlge_suspend(pdev, PMSG_SUSPEND);
4899}
4900
4901static struct pci_driver qlge_driver = {
4902 .name = DRV_NAME,
4903 .id_table = qlge_pci_tbl,
4904 .probe = qlge_probe,
4905 .remove = __devexit_p(qlge_remove),
4906#ifdef CONFIG_PM
4907 .suspend = qlge_suspend,
4908 .resume = qlge_resume,
4909#endif
4910 .shutdown = qlge_shutdown,
4911 .err_handler = &qlge_err_handler
4912};
4913
4914static int __init qlge_init_module(void)
4915{
4916 return pci_register_driver(&qlge_driver);
4917}
4918
4919static void __exit qlge_exit(void)
4920{
4921 pci_unregister_driver(&qlge_driver);
4922}
4923
4924module_init(qlge_init_module);
4925module_exit(qlge_exit);