blob: 44cf72ac248947328fd4654ac254ca97c354a9ad [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
Ron Mercer76b26692009-10-08 09:54:40 +0000408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
Patrick McHardyf6469682013-04-19 02:04:27 +0000412 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400426 status =
427 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 status = -EPERM;
442 }
443exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 return status;
445}
446
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000447/* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452{
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000458 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000476 return status;
477}
478
Ron Mercer6a473302009-07-02 06:06:12 +0000479void ql_link_on(struct ql_adapter *qdev)
480{
Joe Perchesae9540f72010-02-09 11:49:52 +0000481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484}
485
486void ql_link_off(struct ql_adapter *qdev)
487{
Joe Perchesae9540f72010-02-09 11:49:52 +0000488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000572 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000579 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 return status;
615}
616
617static void ql_enable_interrupts(struct ql_adapter *qdev)
618{
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620}
621
622static void ql_disable_interrupts(struct ql_adapter *qdev)
623{
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625}
626
627/* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300630 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700633u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657}
658
659static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660{
661 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000671 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700672 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 var = ql_read32(qdev, STS);
676 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000678 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 return var;
680}
681
682static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683{
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696}
697
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000698static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699{
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000716
717 return csum;
718}
719
Ron Mercer26351472009-02-02 13:53:57 -0800720static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400740exit:
741 return status;
742}
743
Ron Mercercdca8d02009-03-02 08:07:31 +0000744static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745{
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000750 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
Ron Mercere4552f52009-06-09 05:39:32 +0000755 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 status = -EINVAL;
779 goto exit;
780 }
781
Ron Mercer542512e2009-06-09 05:39:33 +0000782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000801 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 qdev->ndev->addr_len);
803
804exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807}
808
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000809static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400810{
811 int i;
812 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800813 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800814 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816
817 /* Second function's parameters follow the first
818 * function's.
819 */
Ron Mercere4552f52009-06-09 05:39:32 +0000820 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000821 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800827 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 goto exit;
832 }
833
834 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400854exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857}
858
859/* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864{
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876}
877
878/* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883{
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899exit:
900 return status;
901}
902
903/* This is used for reading the 64-bit statistics regs. */
904int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905{
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920exit:
921 return status;
922}
923
Ron Mercercdca8d02009-03-02 08:07:31 +0000924static int ql_8000_port_initialize(struct ql_adapter *qdev)
925{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000926 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939exit:
940 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000941}
942
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400943/* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000949static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400950{
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964 }
965 return status;
966 }
967
Joe Perchesae9540f72010-02-09 11:49:52 +0000968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022}
1023
Ron Mercer7c734352009-10-19 03:32:19 +00001024static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025{
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027}
1028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001029/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001030static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031{
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038}
1039
Ron Mercer7c734352009-10-19 03:32:19 +00001040static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042{
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001046 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071}
1072
1073/* Update an rx ring index. */
1074static void ql_update_cq(struct rx_ring *rx_ring)
1075{
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082}
1083
1084static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085{
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087}
1088
Ron Mercer7c734352009-10-19 03:32:19 +00001089static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091{
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001136/* Process (refill) a large buffer queue. */
1137static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138{
Ron Mercer49f21862009-02-23 10:42:16 +00001139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001141 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001142 u64 map;
1143 int i;
1144
Ron Mercer7c734352009-10-19 03:32:19 +00001145 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001152 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001153 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 return;
1157 }
Ron Mercer7c734352009-10-19 03:32:19 +00001158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001163 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001164 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 }
1188}
1189
1190/* Process (refill) a small buffer queue. */
1191static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192{
Ron Mercer49f21862009-02-23 10:42:16 +00001193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001195 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001205 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001212 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 if (sbq_desc->p.skb == NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001214 rx_ring->sbq_clean_idx = clean_idx;
1215 return;
1216 }
1217 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1218 map = pci_map_single(qdev->pdev,
1219 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001220 rx_ring->sbq_buf_size,
1221 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001222 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001223 netif_err(qdev, ifup, qdev->ndev,
1224 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001225 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001226 dev_kfree_skb_any(sbq_desc->p.skb);
1227 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001228 return;
1229 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001230 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1231 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001232 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001233 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001234 }
1235
1236 clean_idx++;
1237 if (clean_idx == rx_ring->sbq_len)
1238 clean_idx = 0;
1239 }
1240 rx_ring->sbq_clean_idx = clean_idx;
1241 rx_ring->sbq_prod_idx += 16;
1242 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1243 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001244 rx_ring->sbq_free_cnt -= 16;
1245 }
1246
1247 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001248 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1249 "sbq: updating prod idx = %d.\n",
1250 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001251 ql_write_db_reg(rx_ring->sbq_prod_idx,
1252 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 }
1254}
1255
1256static void ql_update_buffer_queues(struct ql_adapter *qdev,
1257 struct rx_ring *rx_ring)
1258{
1259 ql_update_sbq(qdev, rx_ring);
1260 ql_update_lbq(qdev, rx_ring);
1261}
1262
1263/* Unmaps tx buffers. Can be called from send() if a pci mapping
1264 * fails at some stage, or from the interrupt when a tx completes.
1265 */
1266static void ql_unmap_send(struct ql_adapter *qdev,
1267 struct tx_ring_desc *tx_ring_desc, int mapped)
1268{
1269 int i;
1270 for (i = 0; i < mapped; i++) {
1271 if (i == 0 || (i == 7 && mapped > 7)) {
1272 /*
1273 * Unmap the skb->data area, or the
1274 * external sglist (AKA the Outbound
1275 * Address List (OAL)).
1276 * If its the zeroeth element, then it's
1277 * the skb->data area. If it's the 7th
1278 * element and there is more than 6 frags,
1279 * then its an OAL.
1280 */
1281 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001282 netif_printk(qdev, tx_done, KERN_DEBUG,
1283 qdev->ndev,
1284 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001285 }
1286 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001287 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 maplen),
1291 PCI_DMA_TODEVICE);
1292 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001293 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1294 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001295 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001296 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 maplen), PCI_DMA_TODEVICE);
1300 }
1301 }
1302
1303}
1304
1305/* Map the buffers for this transmit. This will return
1306 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1307 */
1308static int ql_map_send(struct ql_adapter *qdev,
1309 struct ob_mac_iocb_req *mac_iocb_ptr,
1310 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1311{
1312 int len = skb_headlen(skb);
1313 dma_addr_t map;
1314 int frag_idx, err, map_idx = 0;
1315 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1316 int frag_cnt = skb_shinfo(skb)->nr_frags;
1317
1318 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001319 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1320 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001329 netif_err(qdev, tx_queued, qdev->ndev,
1330 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001337 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001376 netif_err(qdev, tx_queued, qdev->ndev,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001391 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001392 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
Eric Dumazet9e903e02011-10-18 21:00:24 +00001399 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001400 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001401
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001404 netif_err(qdev, tx_queued, qdev->ndev,
1405 "PCI mapping frags failed with error: %d.\n",
1406 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001407 goto map_error;
1408 }
1409
1410 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001411 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001412 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1413 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001414 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001415
1416 }
1417 /* Save the number of segments we've mapped. */
1418 tx_ring_desc->map_cnt = map_idx;
1419 /* Terminate the last segment. */
1420 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1421 return NETDEV_TX_OK;
1422
1423map_error:
1424 /*
1425 * If the first frag mapping failed, then i will be zero.
1426 * This causes the unmap of the skb->data area. Otherwise
1427 * we pass in the number of frags that mapped successfully
1428 * so they can be umapped.
1429 */
1430 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1431 return NETDEV_TX_BUSY;
1432}
1433
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001434/* Categorizing receive firmware frame errors */
1435static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1436{
1437 struct nic_stats *stats = &qdev->nic_stats;
1438
1439 stats->rx_err_count++;
1440
1441 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1442 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1443 stats->rx_code_err++;
1444 break;
1445 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1446 stats->rx_oversize_err++;
1447 break;
1448 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1449 stats->rx_undersize_err++;
1450 break;
1451 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1452 stats->rx_preamble_err++;
1453 break;
1454 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1455 stats->rx_frame_len_err++;
1456 break;
1457 case IB_MAC_IOCB_RSP_ERR_CRC:
1458 stats->rx_crc_err++;
1459 default:
1460 break;
1461 }
1462}
1463
Ron Mercer4f848c02010-01-02 10:37:43 +00001464/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001465static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1466 struct rx_ring *rx_ring,
1467 struct ib_mac_iocb_rsp *ib_mac_rsp,
1468 u32 length,
1469 u16 vlan_id)
1470{
1471 struct sk_buff *skb;
1472 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001473 struct napi_struct *napi = &rx_ring->napi;
1474
1475 napi->dev = qdev->ndev;
1476
1477 skb = napi_get_frags(napi);
1478 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001479 netif_err(qdev, drv, qdev->ndev,
1480 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001481 rx_ring->rx_dropped++;
1482 put_page(lbq_desc->p.pg_chunk.page);
1483 return;
1484 }
1485 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001486 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1487 lbq_desc->p.pg_chunk.page,
1488 lbq_desc->p.pg_chunk.offset,
1489 length);
Ron Mercer63526712010-01-02 10:37:44 +00001490
1491 skb->len += length;
1492 skb->data_len += length;
1493 skb->truesize += length;
1494 skb_shinfo(skb)->nr_frags++;
1495
1496 rx_ring->rx_packets++;
1497 rx_ring->rx_bytes += length;
1498 skb->ip_summed = CHECKSUM_UNNECESSARY;
1499 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001500 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001501 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001502 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001503}
1504
1505/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001506static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1507 struct rx_ring *rx_ring,
1508 struct ib_mac_iocb_rsp *ib_mac_rsp,
1509 u32 length,
1510 u16 vlan_id)
1511{
1512 struct net_device *ndev = qdev->ndev;
1513 struct sk_buff *skb = NULL;
1514 void *addr;
1515 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1516 struct napi_struct *napi = &rx_ring->napi;
1517
1518 skb = netdev_alloc_skb(ndev, length);
1519 if (!skb) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001520 rx_ring->rx_dropped++;
1521 put_page(lbq_desc->p.pg_chunk.page);
1522 return;
1523 }
1524
1525 addr = lbq_desc->p.pg_chunk.va;
1526 prefetch(addr);
1527
Ron Mercer4f848c02010-01-02 10:37:43 +00001528 /* The max framesize filter on this chip is set higher than
1529 * MTU since FCoE uses 2k frames.
1530 */
1531 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001532 netif_err(qdev, drv, qdev->ndev,
1533 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001534 rx_ring->rx_dropped++;
1535 goto err_out;
1536 }
1537 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001538 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1539 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1540 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001541 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1542 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1543 length-ETH_HLEN);
1544 skb->len += length-ETH_HLEN;
1545 skb->data_len += length-ETH_HLEN;
1546 skb->truesize += length-ETH_HLEN;
1547
1548 rx_ring->rx_packets++;
1549 rx_ring->rx_bytes += skb->len;
1550 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001551 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001552
Michał Mirosław88230fd2011-04-18 13:31:21 +00001553 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1555 /* TCP frame. */
1556 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001557 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1558 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001559 skb->ip_summed = CHECKSUM_UNNECESSARY;
1560 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1561 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1562 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001563 struct iphdr *iph =
1564 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001565 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001566 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001567 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001568 netif_printk(qdev, rx_status, KERN_DEBUG,
1569 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001570 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001571 }
1572 }
1573 }
1574
1575 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001576 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001577 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001578 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1579 napi_gro_receive(napi, skb);
1580 else
1581 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001582 return;
1583err_out:
1584 dev_kfree_skb_any(skb);
1585 put_page(lbq_desc->p.pg_chunk.page);
1586}
1587
1588/* Process an inbound completion from an rx ring. */
1589static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1590 struct rx_ring *rx_ring,
1591 struct ib_mac_iocb_rsp *ib_mac_rsp,
1592 u32 length,
1593 u16 vlan_id)
1594{
1595 struct net_device *ndev = qdev->ndev;
1596 struct sk_buff *skb = NULL;
1597 struct sk_buff *new_skb = NULL;
1598 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1599
1600 skb = sbq_desc->p.skb;
1601 /* Allocate new_skb and copy */
1602 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1603 if (new_skb == NULL) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001604 rx_ring->rx_dropped++;
1605 return;
1606 }
1607 skb_reserve(new_skb, NET_IP_ALIGN);
1608 memcpy(skb_put(new_skb, length), skb->data, length);
1609 skb = new_skb;
1610
Ron Mercer4f848c02010-01-02 10:37:43 +00001611 /* loopback self test for ethtool */
1612 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1613 ql_check_lb_frame(qdev, skb);
1614 dev_kfree_skb_any(skb);
1615 return;
1616 }
1617
1618 /* The max framesize filter on this chip is set higher than
1619 * MTU since FCoE uses 2k frames.
1620 */
1621 if (skb->len > ndev->mtu + ETH_HLEN) {
1622 dev_kfree_skb_any(skb);
1623 rx_ring->rx_dropped++;
1624 return;
1625 }
1626
1627 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001628 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001629 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630 "%s Multicast.\n",
1631 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1632 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1633 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1634 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1635 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1636 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001637 }
1638 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001639 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1640 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001641
1642 rx_ring->rx_packets++;
1643 rx_ring->rx_bytes += skb->len;
1644 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001645 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001646
1647 /* If rx checksum is on, and there are no
1648 * csum or frame errors.
1649 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001650 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001651 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1652 /* TCP frame. */
1653 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001654 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1655 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001656 skb->ip_summed = CHECKSUM_UNNECESSARY;
1657 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1658 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1659 /* Unfragmented ipv4 UDP frame. */
1660 struct iphdr *iph = (struct iphdr *) skb->data;
1661 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001662 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001663 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001664 netif_printk(qdev, rx_status, KERN_DEBUG,
1665 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001666 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001667 }
1668 }
1669 }
1670
1671 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001672 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001673 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001674 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1675 napi_gro_receive(&rx_ring->napi, skb);
1676 else
1677 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001678}
1679
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001680static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001681{
1682 void *temp_addr = skb->data;
1683
1684 /* Undo the skb_reserve(skb,32) we did before
1685 * giving to hardware, and realign data on
1686 * a 2-byte boundary.
1687 */
1688 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1689 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1690 skb_copy_to_linear_data(skb, temp_addr,
1691 (unsigned int)len);
1692}
1693
1694/*
1695 * This function builds an skb for the given inbound
1696 * completion. It will be rewritten for readability in the near
1697 * future, but for not it works well.
1698 */
1699static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1700 struct rx_ring *rx_ring,
1701 struct ib_mac_iocb_rsp *ib_mac_rsp)
1702{
1703 struct bq_desc *lbq_desc;
1704 struct bq_desc *sbq_desc;
1705 struct sk_buff *skb = NULL;
1706 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1707 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1708
1709 /*
1710 * Handle the header buffer if present.
1711 */
1712 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1713 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001714 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001716 /*
1717 * Headers fit nicely into a small buffer.
1718 */
1719 sbq_desc = ql_get_curr_sbuf(rx_ring);
1720 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001721 dma_unmap_addr(sbq_desc, mapaddr),
1722 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001723 PCI_DMA_FROMDEVICE);
1724 skb = sbq_desc->p.skb;
1725 ql_realign_skb(skb, hdr_len);
1726 skb_put(skb, hdr_len);
1727 sbq_desc->p.skb = NULL;
1728 }
1729
1730 /*
1731 * Handle the data buffer(s).
1732 */
1733 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001734 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1735 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001736 return skb;
1737 }
1738
1739 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1740 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001741 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 "Headers in small, data of %d bytes in small, combine them.\n",
1743 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001744 /*
1745 * Data is less than small buffer size so it's
1746 * stuffed in a small buffer.
1747 * For this case we append the data
1748 * from the "data" small buffer to the "header" small
1749 * buffer.
1750 */
1751 sbq_desc = ql_get_curr_sbuf(rx_ring);
1752 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001753 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001754 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001755 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756 (sbq_desc, maplen),
1757 PCI_DMA_FROMDEVICE);
1758 memcpy(skb_put(skb, length),
1759 sbq_desc->p.skb->data, length);
1760 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001761 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001762 (sbq_desc,
1763 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001764 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001765 (sbq_desc,
1766 maplen),
1767 PCI_DMA_FROMDEVICE);
1768 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001769 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1770 "%d bytes in a single small buffer.\n",
1771 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001772 sbq_desc = ql_get_curr_sbuf(rx_ring);
1773 skb = sbq_desc->p.skb;
1774 ql_realign_skb(skb, length);
1775 skb_put(skb, length);
1776 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001777 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001778 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001779 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001780 maplen),
1781 PCI_DMA_FROMDEVICE);
1782 sbq_desc->p.skb = NULL;
1783 }
1784 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1785 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001786 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1787 "Header in small, %d bytes in large. Chain large to small!\n",
1788 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001789 /*
1790 * The data is in a single large buffer. We
1791 * chain it to the header buffer's skb and let
1792 * it rip.
1793 */
Ron Mercer7c734352009-10-19 03:32:19 +00001794 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001795 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1796 "Chaining page at offset = %d, for %d bytes to skb.\n",
1797 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001798 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1799 lbq_desc->p.pg_chunk.offset,
1800 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001801 skb->len += length;
1802 skb->data_len += length;
1803 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001804 } else {
1805 /*
1806 * The headers and data are in a single large buffer. We
1807 * copy it to a new skb and let it go. This can happen with
1808 * jumbo mtu on a non-TCP/UDP frame.
1809 */
Ron Mercer7c734352009-10-19 03:32:19 +00001810 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001811 skb = netdev_alloc_skb(qdev->ndev, length);
1812 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001813 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1814 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001815 return NULL;
1816 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001817 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001818 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001819 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001820 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001821 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001822 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001823 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1824 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1825 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001826 skb_fill_page_desc(skb, 0,
1827 lbq_desc->p.pg_chunk.page,
1828 lbq_desc->p.pg_chunk.offset,
1829 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001830 skb->len += length;
1831 skb->data_len += length;
1832 skb->truesize += length;
1833 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001834 __pskb_pull_tail(skb,
1835 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1836 VLAN_ETH_HLEN : ETH_HLEN);
1837 }
1838 } else {
1839 /*
1840 * The data is in a chain of large buffers
1841 * pointed to by a small buffer. We loop
1842 * thru and chain them to the our small header
1843 * buffer's skb.
1844 * frags: There are 18 max frags and our small
1845 * buffer will hold 32 of them. The thing is,
1846 * we'll use 3 max for our 9000 byte jumbo
1847 * frames. If the MTU goes up we could
1848 * eventually be in trouble.
1849 */
Ron Mercer7c734352009-10-19 03:32:19 +00001850 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001851 sbq_desc = ql_get_curr_sbuf(rx_ring);
1852 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001853 dma_unmap_addr(sbq_desc, mapaddr),
1854 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 PCI_DMA_FROMDEVICE);
1856 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1857 /*
1858 * This is an non TCP/UDP IP frame, so
1859 * the headers aren't split into a small
1860 * buffer. We have to use the small buffer
1861 * that contains our sg list as our skb to
1862 * send upstairs. Copy the sg list here to
1863 * a local buffer and use it to find the
1864 * pages to chain.
1865 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001866 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867 "%d bytes of headers & data in chain of large.\n",
1868 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001869 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001870 sbq_desc->p.skb = NULL;
1871 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001872 }
1873 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001874 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1875 size = (length < rx_ring->lbq_buf_size) ? length :
1876 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001877
Joe Perchesae9540f72010-02-09 11:49:52 +00001878 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1879 "Adding page %d to skb for %d bytes.\n",
1880 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001881 skb_fill_page_desc(skb, i,
1882 lbq_desc->p.pg_chunk.page,
1883 lbq_desc->p.pg_chunk.offset,
1884 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001885 skb->len += size;
1886 skb->data_len += size;
1887 skb->truesize += size;
1888 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001889 i++;
1890 }
1891 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1892 VLAN_ETH_HLEN : ETH_HLEN);
1893 }
1894 return skb;
1895}
1896
1897/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001898static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001899 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001900 struct ib_mac_iocb_rsp *ib_mac_rsp,
1901 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001902{
1903 struct net_device *ndev = qdev->ndev;
1904 struct sk_buff *skb = NULL;
1905
1906 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1907
1908 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1909 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001910 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1911 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001912 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001913 return;
1914 }
1915
Ron Mercerec33a492009-06-09 05:39:28 +00001916 /* The max framesize filter on this chip is set higher than
1917 * MTU since FCoE uses 2k frames.
1918 */
1919 if (skb->len > ndev->mtu + ETH_HLEN) {
1920 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001921 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001922 return;
1923 }
1924
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001925 /* loopback self test for ethtool */
1926 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1927 ql_check_lb_frame(qdev, skb);
1928 dev_kfree_skb_any(skb);
1929 return;
1930 }
1931
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001932 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001933 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001934 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1935 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1936 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1937 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1938 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1939 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1940 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001941 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942 }
1943 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001944 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1945 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001946 }
Ron Mercerd555f592009-03-09 10:59:19 +00001947
Ron Mercerd555f592009-03-09 10:59:19 +00001948 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001949 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001950
1951 /* If rx checksum is on, and there are no
1952 * csum or frame errors.
1953 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001954 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001955 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1956 /* TCP frame. */
1957 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001958 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1959 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001960 skb->ip_summed = CHECKSUM_UNNECESSARY;
1961 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1962 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1963 /* Unfragmented ipv4 UDP frame. */
1964 struct iphdr *iph = (struct iphdr *) skb->data;
1965 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001966 htons(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00001967 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001968 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1969 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001970 }
1971 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001972 }
Ron Mercerd555f592009-03-09 10:59:19 +00001973
Ron Mercer885ee392009-11-03 13:49:31 +00001974 rx_ring->rx_packets++;
1975 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001976 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001977 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001978 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001979 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1980 napi_gro_receive(&rx_ring->napi, skb);
1981 else
1982 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001983}
1984
Ron Mercer4f848c02010-01-02 10:37:43 +00001985/* Process an inbound completion from an rx ring. */
1986static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1987 struct rx_ring *rx_ring,
1988 struct ib_mac_iocb_rsp *ib_mac_rsp)
1989{
1990 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1991 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1992 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1993 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1994
1995 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1996
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001997 /* Frame error, so drop the packet. */
1998 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1999 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2000 return (unsigned long)length;
2001 }
2002
Ron Mercer4f848c02010-01-02 10:37:43 +00002003 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2004 /* The data and headers are split into
2005 * separate buffers.
2006 */
2007 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2008 vlan_id);
2009 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2010 /* The data fit in a single small buffer.
2011 * Allocate a new skb, copy the data and
2012 * return the buffer to the free pool.
2013 */
2014 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2015 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002016 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2017 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2018 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2019 /* TCP packet in a page chunk that's been checksummed.
2020 * Tack it on to our GRO skb and let it go.
2021 */
2022 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2023 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002024 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2025 /* Non-TCP packet in a page chunk. Allocate an
2026 * skb, tack it on frags, and send it up.
2027 */
2028 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2029 length, vlan_id);
2030 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002031 /* Non-TCP/UDP large frames that span multiple buffers
2032 * can be processed corrrectly by the split frame logic.
2033 */
2034 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2035 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002036 }
2037
2038 return (unsigned long)length;
2039}
2040
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002041/* Process an outbound completion from an rx ring. */
2042static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2043 struct ob_mac_iocb_rsp *mac_rsp)
2044{
2045 struct tx_ring *tx_ring;
2046 struct tx_ring_desc *tx_ring_desc;
2047
2048 QL_DUMP_OB_MAC_RSP(mac_rsp);
2049 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2050 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2051 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002052 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2053 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002054 dev_kfree_skb(tx_ring_desc->skb);
2055 tx_ring_desc->skb = NULL;
2056
2057 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2058 OB_MAC_IOCB_RSP_S |
2059 OB_MAC_IOCB_RSP_L |
2060 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2061 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002062 netif_warn(qdev, tx_done, qdev->ndev,
2063 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002064 }
2065 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002066 netif_warn(qdev, tx_done, qdev->ndev,
2067 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002068 }
2069 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002070 netif_warn(qdev, tx_done, qdev->ndev,
2071 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002072 }
2073 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002074 netif_warn(qdev, tx_done, qdev->ndev,
2075 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002076 }
2077 }
2078 atomic_inc(&tx_ring->tx_count);
2079}
2080
2081/* Fire up a handler to reset the MPI processor. */
2082void ql_queue_fw_error(struct ql_adapter *qdev)
2083{
Ron Mercer6a473302009-07-02 06:06:12 +00002084 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002085 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2086}
2087
2088void ql_queue_asic_error(struct ql_adapter *qdev)
2089{
Ron Mercer6a473302009-07-02 06:06:12 +00002090 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002091 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002092 /* Clear adapter up bit to signal the recovery
2093 * process that it shouldn't kill the reset worker
2094 * thread
2095 */
2096 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002097 /* Set asic recovery bit to indicate reset process that we are
2098 * in fatal error recovery process rather than normal close
2099 */
2100 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002101 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2102}
2103
2104static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2105 struct ib_ae_iocb_rsp *ib_ae_rsp)
2106{
2107 switch (ib_ae_rsp->event) {
2108 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002109 netif_err(qdev, rx_err, qdev->ndev,
2110 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002111 ql_queue_fw_error(qdev);
2112 return;
2113
2114 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002115 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2116 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002117 ql_queue_asic_error(qdev);
2118 return;
2119
2120 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002121 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002122 ql_queue_asic_error(qdev);
2123 break;
2124
2125 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002126 netdev_err(qdev->ndev, "PCI error occurred when reading "
2127 "anonymous buffers from rx_ring %d.\n",
2128 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002129 ql_queue_asic_error(qdev);
2130 break;
2131
2132 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002133 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2134 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002135 ql_queue_asic_error(qdev);
2136 break;
2137 }
2138}
2139
2140static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2141{
2142 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002143 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002144 struct ob_mac_iocb_rsp *net_rsp = NULL;
2145 int count = 0;
2146
Ron Mercer1e213302009-03-09 10:59:21 +00002147 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002148 /* While there are entries in the completion queue. */
2149 while (prod != rx_ring->cnsmr_idx) {
2150
Joe Perchesae9540f72010-02-09 11:49:52 +00002151 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2152 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2153 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002154
2155 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2156 rmb();
2157 switch (net_rsp->opcode) {
2158
2159 case OPCODE_OB_MAC_TSO_IOCB:
2160 case OPCODE_OB_MAC_IOCB:
2161 ql_process_mac_tx_intr(qdev, net_rsp);
2162 break;
2163 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002164 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2165 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2166 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 }
2168 count++;
2169 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002170 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002171 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002172 if (!net_rsp)
2173 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002174 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002175 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002176 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002177 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002178 /*
2179 * The queue got stopped because the tx_ring was full.
2180 * Wake it up, because it's now at least 25% empty.
2181 */
Ron Mercer1e213302009-03-09 10:59:21 +00002182 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002183 }
2184
2185 return count;
2186}
2187
2188static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2189{
2190 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002191 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002192 struct ql_net_rsp_iocb *net_rsp;
2193 int count = 0;
2194
2195 /* While there are entries in the completion queue. */
2196 while (prod != rx_ring->cnsmr_idx) {
2197
Joe Perchesae9540f72010-02-09 11:49:52 +00002198 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2199 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2200 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201
2202 net_rsp = rx_ring->curr_entry;
2203 rmb();
2204 switch (net_rsp->opcode) {
2205 case OPCODE_IB_MAC_IOCB:
2206 ql_process_mac_rx_intr(qdev, rx_ring,
2207 (struct ib_mac_iocb_rsp *)
2208 net_rsp);
2209 break;
2210
2211 case OPCODE_IB_AE_IOCB:
2212 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2213 net_rsp);
2214 break;
2215 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002216 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2217 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2218 net_rsp->opcode);
2219 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002220 }
2221 count++;
2222 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002224 if (count == budget)
2225 break;
2226 }
2227 ql_update_buffer_queues(qdev, rx_ring);
2228 ql_write_cq_idx(rx_ring);
2229 return count;
2230}
2231
2232static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2233{
2234 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2235 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002236 struct rx_ring *trx_ring;
2237 int i, work_done = 0;
2238 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239
Joe Perchesae9540f72010-02-09 11:49:52 +00002240 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2241 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002242
Ron Mercer39aa8162009-08-27 11:02:11 +00002243 /* Service the TX rings first. They start
2244 * right after the RSS rings. */
2245 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2246 trx_ring = &qdev->rx_ring[i];
2247 /* If this TX completion ring belongs to this vector and
2248 * it's not empty then service it.
2249 */
2250 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2251 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2252 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002253 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2254 "%s: Servicing TX completion ring %d.\n",
2255 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002256 ql_clean_outbound_rx_ring(trx_ring);
2257 }
2258 }
2259
2260 /*
2261 * Now service the RSS ring if it's active.
2262 */
2263 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2264 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002265 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2266 "%s: Servicing RX completion ring %d.\n",
2267 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002268 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2269 }
2270
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002272 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002273 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2274 }
2275 return work_done;
2276}
2277
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002278static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002279{
2280 struct ql_adapter *qdev = netdev_priv(ndev);
2281
Patrick McHardyf6469682013-04-19 02:04:27 +00002282 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002283 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002284 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002285 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002286 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2287 }
2288}
2289
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002290static netdev_features_t qlge_fix_features(struct net_device *ndev,
2291 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002292{
2293 /*
2294 * Since there is no support for separate rx/tx vlan accel
2295 * enable/disable make sure tx flag is always in same state as rx.
2296 */
Patrick McHardyf6469682013-04-19 02:04:27 +00002297 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2298 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002299 else
Patrick McHardyf6469682013-04-19 02:04:27 +00002300 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002301
2302 return features;
2303}
2304
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002305static int qlge_set_features(struct net_device *ndev,
2306 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002307{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002308 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002309
Patrick McHardyf6469682013-04-19 02:04:27 +00002310 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002311 qlge_vlan_mode(ndev, features);
2312
2313 return 0;
2314}
2315
Jiri Pirko8e586132011-12-08 19:52:37 -05002316static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002317{
2318 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002319 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002320
Jiri Pirko8e586132011-12-08 19:52:37 -05002321 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2322 MAC_ADDR_TYPE_VLAN, vid);
2323 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002324 netif_err(qdev, ifup, qdev->ndev,
2325 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002326 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002327}
2328
Patrick McHardy80d5c362013-04-19 02:04:28 +00002329static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002330{
2331 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002332 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002333 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002334
Ron Mercercc288f52009-02-23 10:42:14 +00002335 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2336 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002337 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002338
Jiri Pirko8e586132011-12-08 19:52:37 -05002339 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002340 set_bit(vid, qdev->active_vlans);
2341
Ron Mercercc288f52009-02-23 10:42:14 +00002342 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002343
2344 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345}
2346
Jiri Pirko8e586132011-12-08 19:52:37 -05002347static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002348{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002349 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002350 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002351
Jiri Pirko8e586132011-12-08 19:52:37 -05002352 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2353 MAC_ADDR_TYPE_VLAN, vid);
2354 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002355 netif_err(qdev, ifup, qdev->ndev,
2356 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002357 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002358}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002359
Patrick McHardy80d5c362013-04-19 02:04:28 +00002360static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002361{
2362 struct ql_adapter *qdev = netdev_priv(ndev);
2363 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002364 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002365
2366 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002368 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002369
Jiri Pirko8e586132011-12-08 19:52:37 -05002370 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002371 clear_bit(vid, qdev->active_vlans);
2372
2373 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002374
2375 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002376}
2377
Ron Mercerc1b60092010-10-27 04:58:12 +00002378static void qlge_restore_vlan(struct ql_adapter *qdev)
2379{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002380 int status;
2381 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002382
Jiri Pirko18c49b92011-07-21 03:24:11 +00002383 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2384 if (status)
2385 return;
2386
2387 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2388 __qlge_vlan_rx_add_vid(qdev, vid);
2389
2390 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002391}
2392
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002393/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2394static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2395{
2396 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002397 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002398 return IRQ_HANDLED;
2399}
2400
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002401/* This handles a fatal error, MPI activity, and the default
2402 * rx_ring in an MSI-X multiple vector environment.
2403 * In MSI/Legacy environment it also process the rest of
2404 * the rx_rings.
2405 */
2406static irqreturn_t qlge_isr(int irq, void *dev_id)
2407{
2408 struct rx_ring *rx_ring = dev_id;
2409 struct ql_adapter *qdev = rx_ring->qdev;
2410 struct intr_context *intr_context = &qdev->intr_context[0];
2411 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002412 int work_done = 0;
2413
Ron Mercerbb0d2152008-10-20 10:30:26 -07002414 spin_lock(&qdev->hw_lock);
2415 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002416 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2417 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002418 spin_unlock(&qdev->hw_lock);
2419 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002420 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002421 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002422
Ron Mercerbb0d2152008-10-20 10:30:26 -07002423 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002424
2425 /*
2426 * Check for fatal error.
2427 */
2428 if (var & STS_FE) {
2429 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002430 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002431 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002432 netdev_err(qdev->ndev, "Resetting chip. "
2433 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434 return IRQ_HANDLED;
2435 }
2436
2437 /*
2438 * Check MPI processor activity.
2439 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002440 if ((var & STS_PI) &&
2441 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002442 /*
2443 * We've got an async event or mailbox completion.
2444 * Handle it and clear the source of the interrupt.
2445 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002446 netif_err(qdev, intr, qdev->ndev,
2447 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002449 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2450 queue_delayed_work_on(smp_processor_id(),
2451 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002452 work_done++;
2453 }
2454
2455 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002456 * Get the bit-mask that shows the active queues for this
2457 * pass. Compare it to the queues that this irq services
2458 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002459 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002460 var = ql_read32(qdev, ISR1);
2461 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002462 netif_info(qdev, intr, qdev->ndev,
2463 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002464 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002465 napi_schedule(&rx_ring->napi);
2466 work_done++;
2467 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002468 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002469 return work_done ? IRQ_HANDLED : IRQ_NONE;
2470}
2471
2472static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2473{
2474
2475 if (skb_is_gso(skb)) {
2476 int err;
2477 if (skb_header_cloned(skb)) {
2478 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 if (err)
2480 return err;
2481 }
2482
2483 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2484 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2485 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2486 mac_iocb_ptr->total_hdrs_len =
2487 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2488 mac_iocb_ptr->net_trans_offset =
2489 cpu_to_le16(skb_network_offset(skb) |
2490 skb_transport_offset(skb)
2491 << OB_MAC_TRANSPORT_HDR_SHIFT);
2492 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2493 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2494 if (likely(skb->protocol == htons(ETH_P_IP))) {
2495 struct iphdr *iph = ip_hdr(skb);
2496 iph->check = 0;
2497 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2498 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2499 iph->daddr, 0,
2500 IPPROTO_TCP,
2501 0);
2502 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2503 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2504 tcp_hdr(skb)->check =
2505 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2506 &ipv6_hdr(skb)->daddr,
2507 0, IPPROTO_TCP, 0);
2508 }
2509 return 1;
2510 }
2511 return 0;
2512}
2513
2514static void ql_hw_csum_setup(struct sk_buff *skb,
2515 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2516{
2517 int len;
2518 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002519 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002520 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2521 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2522 mac_iocb_ptr->net_trans_offset =
2523 cpu_to_le16(skb_network_offset(skb) |
2524 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2525
2526 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2527 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2528 if (likely(iph->protocol == IPPROTO_TCP)) {
2529 check = &(tcp_hdr(skb)->check);
2530 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2531 mac_iocb_ptr->total_hdrs_len =
2532 cpu_to_le16(skb_transport_offset(skb) +
2533 (tcp_hdr(skb)->doff << 2));
2534 } else {
2535 check = &(udp_hdr(skb)->check);
2536 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2537 mac_iocb_ptr->total_hdrs_len =
2538 cpu_to_le16(skb_transport_offset(skb) +
2539 sizeof(struct udphdr));
2540 }
2541 *check = ~csum_tcpudp_magic(iph->saddr,
2542 iph->daddr, len, iph->protocol, 0);
2543}
2544
Stephen Hemminger613573252009-08-31 19:50:58 +00002545static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002546{
2547 struct tx_ring_desc *tx_ring_desc;
2548 struct ob_mac_iocb_req *mac_iocb_ptr;
2549 struct ql_adapter *qdev = netdev_priv(ndev);
2550 int tso;
2551 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002552 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002553
2554 tx_ring = &qdev->tx_ring[tx_ring_idx];
2555
Ron Mercer74c50b42009-03-09 10:59:27 +00002556 if (skb_padto(skb, ETH_ZLEN))
2557 return NETDEV_TX_OK;
2558
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002559 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002560 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002561 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002562 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002563 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002564 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002565 return NETDEV_TX_BUSY;
2566 }
2567 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2568 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002569 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002570
2571 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2572 mac_iocb_ptr->tid = tx_ring_desc->index;
2573 /* We use the upper 32-bits to store the tx queue for this IO.
2574 * When we get the completion we can use it to establish the context.
2575 */
2576 mac_iocb_ptr->txq_idx = tx_ring_idx;
2577 tx_ring_desc->skb = skb;
2578
2579 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2580
Jesse Grosseab6d182010-10-20 13:56:03 +00002581 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002582 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2583 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002584 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2585 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2586 }
2587 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2588 if (tso < 0) {
2589 dev_kfree_skb_any(skb);
2590 return NETDEV_TX_OK;
2591 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2592 ql_hw_csum_setup(skb,
2593 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2594 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002595 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2596 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002597 netif_err(qdev, tx_queued, qdev->ndev,
2598 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002599 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002600 return NETDEV_TX_BUSY;
2601 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002602 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2603 tx_ring->prod_idx++;
2604 if (tx_ring->prod_idx == tx_ring->wq_len)
2605 tx_ring->prod_idx = 0;
2606 wmb();
2607
2608 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002609 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2610 "tx queued, slot %d, len %d\n",
2611 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002612
2613 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002614
2615 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2616 netif_stop_subqueue(ndev, tx_ring->wq_id);
2617 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2618 /*
2619 * The queue got stopped because the tx_ring was full.
2620 * Wake it up, because it's now at least 25% empty.
2621 */
2622 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2623 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002624 return NETDEV_TX_OK;
2625}
2626
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002627
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002628static void ql_free_shadow_space(struct ql_adapter *qdev)
2629{
2630 if (qdev->rx_ring_shadow_reg_area) {
2631 pci_free_consistent(qdev->pdev,
2632 PAGE_SIZE,
2633 qdev->rx_ring_shadow_reg_area,
2634 qdev->rx_ring_shadow_reg_dma);
2635 qdev->rx_ring_shadow_reg_area = NULL;
2636 }
2637 if (qdev->tx_ring_shadow_reg_area) {
2638 pci_free_consistent(qdev->pdev,
2639 PAGE_SIZE,
2640 qdev->tx_ring_shadow_reg_area,
2641 qdev->tx_ring_shadow_reg_dma);
2642 qdev->tx_ring_shadow_reg_area = NULL;
2643 }
2644}
2645
2646static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2647{
2648 qdev->rx_ring_shadow_reg_area =
2649 pci_alloc_consistent(qdev->pdev,
2650 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2651 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002652 netif_err(qdev, ifup, qdev->ndev,
2653 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002654 return -ENOMEM;
2655 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002656 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002657 qdev->tx_ring_shadow_reg_area =
2658 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2659 &qdev->tx_ring_shadow_reg_dma);
2660 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002661 netif_err(qdev, ifup, qdev->ndev,
2662 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002663 goto err_wqp_sh_area;
2664 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002665 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002666 return 0;
2667
2668err_wqp_sh_area:
2669 pci_free_consistent(qdev->pdev,
2670 PAGE_SIZE,
2671 qdev->rx_ring_shadow_reg_area,
2672 qdev->rx_ring_shadow_reg_dma);
2673 return -ENOMEM;
2674}
2675
2676static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2677{
2678 struct tx_ring_desc *tx_ring_desc;
2679 int i;
2680 struct ob_mac_iocb_req *mac_iocb_ptr;
2681
2682 mac_iocb_ptr = tx_ring->wq_base;
2683 tx_ring_desc = tx_ring->q;
2684 for (i = 0; i < tx_ring->wq_len; i++) {
2685 tx_ring_desc->index = i;
2686 tx_ring_desc->skb = NULL;
2687 tx_ring_desc->queue_entry = mac_iocb_ptr;
2688 mac_iocb_ptr++;
2689 tx_ring_desc++;
2690 }
2691 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002692}
2693
2694static void ql_free_tx_resources(struct ql_adapter *qdev,
2695 struct tx_ring *tx_ring)
2696{
2697 if (tx_ring->wq_base) {
2698 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2699 tx_ring->wq_base, tx_ring->wq_base_dma);
2700 tx_ring->wq_base = NULL;
2701 }
2702 kfree(tx_ring->q);
2703 tx_ring->q = NULL;
2704}
2705
2706static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2707 struct tx_ring *tx_ring)
2708{
2709 tx_ring->wq_base =
2710 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2711 &tx_ring->wq_base_dma);
2712
Joe Perches8e95a202009-12-03 07:58:21 +00002713 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002714 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2715 goto pci_alloc_err;
2716
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002717 tx_ring->q =
2718 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2719 if (tx_ring->q == NULL)
2720 goto err;
2721
2722 return 0;
2723err:
2724 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2725 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002726 tx_ring->wq_base = NULL;
2727pci_alloc_err:
2728 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002729 return -ENOMEM;
2730}
2731
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002732static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002733{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002734 struct bq_desc *lbq_desc;
2735
Ron Mercer7c734352009-10-19 03:32:19 +00002736 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002737
Ron Mercer7c734352009-10-19 03:32:19 +00002738 curr_idx = rx_ring->lbq_curr_idx;
2739 clean_idx = rx_ring->lbq_clean_idx;
2740 while (curr_idx != clean_idx) {
2741 lbq_desc = &rx_ring->lbq[curr_idx];
2742
2743 if (lbq_desc->p.pg_chunk.last_flag) {
2744 pci_unmap_page(qdev->pdev,
2745 lbq_desc->p.pg_chunk.map,
2746 ql_lbq_block_size(qdev),
2747 PCI_DMA_FROMDEVICE);
2748 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002749 }
Ron Mercer7c734352009-10-19 03:32:19 +00002750
2751 put_page(lbq_desc->p.pg_chunk.page);
2752 lbq_desc->p.pg_chunk.page = NULL;
2753
2754 if (++curr_idx == rx_ring->lbq_len)
2755 curr_idx = 0;
2756
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002757 }
2758}
2759
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002760static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002761{
2762 int i;
2763 struct bq_desc *sbq_desc;
2764
2765 for (i = 0; i < rx_ring->sbq_len; i++) {
2766 sbq_desc = &rx_ring->sbq[i];
2767 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002768 netif_err(qdev, ifup, qdev->ndev,
2769 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002770 return;
2771 }
2772 if (sbq_desc->p.skb) {
2773 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002774 dma_unmap_addr(sbq_desc, mapaddr),
2775 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002776 PCI_DMA_FROMDEVICE);
2777 dev_kfree_skb(sbq_desc->p.skb);
2778 sbq_desc->p.skb = NULL;
2779 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002780 }
2781}
2782
Ron Mercer4545a3f2009-02-23 10:42:17 +00002783/* Free all large and small rx buffers associated
2784 * with the completion queues for this device.
2785 */
2786static void ql_free_rx_buffers(struct ql_adapter *qdev)
2787{
2788 int i;
2789 struct rx_ring *rx_ring;
2790
2791 for (i = 0; i < qdev->rx_ring_count; i++) {
2792 rx_ring = &qdev->rx_ring[i];
2793 if (rx_ring->lbq)
2794 ql_free_lbq_buffers(qdev, rx_ring);
2795 if (rx_ring->sbq)
2796 ql_free_sbq_buffers(qdev, rx_ring);
2797 }
2798}
2799
2800static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2801{
2802 struct rx_ring *rx_ring;
2803 int i;
2804
2805 for (i = 0; i < qdev->rx_ring_count; i++) {
2806 rx_ring = &qdev->rx_ring[i];
2807 if (rx_ring->type != TX_Q)
2808 ql_update_buffer_queues(qdev, rx_ring);
2809 }
2810}
2811
2812static void ql_init_lbq_ring(struct ql_adapter *qdev,
2813 struct rx_ring *rx_ring)
2814{
2815 int i;
2816 struct bq_desc *lbq_desc;
2817 __le64 *bq = rx_ring->lbq_base;
2818
2819 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2820 for (i = 0; i < rx_ring->lbq_len; i++) {
2821 lbq_desc = &rx_ring->lbq[i];
2822 memset(lbq_desc, 0, sizeof(*lbq_desc));
2823 lbq_desc->index = i;
2824 lbq_desc->addr = bq;
2825 bq++;
2826 }
2827}
2828
2829static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002830 struct rx_ring *rx_ring)
2831{
2832 int i;
2833 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002834 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002835
Ron Mercer4545a3f2009-02-23 10:42:17 +00002836 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002837 for (i = 0; i < rx_ring->sbq_len; i++) {
2838 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002839 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002840 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002841 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002842 bq++;
2843 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002844}
2845
2846static void ql_free_rx_resources(struct ql_adapter *qdev,
2847 struct rx_ring *rx_ring)
2848{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002849 /* Free the small buffer queue. */
2850 if (rx_ring->sbq_base) {
2851 pci_free_consistent(qdev->pdev,
2852 rx_ring->sbq_size,
2853 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2854 rx_ring->sbq_base = NULL;
2855 }
2856
2857 /* Free the small buffer queue control blocks. */
2858 kfree(rx_ring->sbq);
2859 rx_ring->sbq = NULL;
2860
2861 /* Free the large buffer queue. */
2862 if (rx_ring->lbq_base) {
2863 pci_free_consistent(qdev->pdev,
2864 rx_ring->lbq_size,
2865 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2866 rx_ring->lbq_base = NULL;
2867 }
2868
2869 /* Free the large buffer queue control blocks. */
2870 kfree(rx_ring->lbq);
2871 rx_ring->lbq = NULL;
2872
2873 /* Free the rx queue. */
2874 if (rx_ring->cq_base) {
2875 pci_free_consistent(qdev->pdev,
2876 rx_ring->cq_size,
2877 rx_ring->cq_base, rx_ring->cq_base_dma);
2878 rx_ring->cq_base = NULL;
2879 }
2880}
2881
2882/* Allocate queues and buffers for this completions queue based
2883 * on the values in the parameter structure. */
2884static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2885 struct rx_ring *rx_ring)
2886{
2887
2888 /*
2889 * Allocate the completion queue for this rx_ring.
2890 */
2891 rx_ring->cq_base =
2892 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2893 &rx_ring->cq_base_dma);
2894
2895 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002896 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002897 return -ENOMEM;
2898 }
2899
2900 if (rx_ring->sbq_len) {
2901 /*
2902 * Allocate small buffer queue.
2903 */
2904 rx_ring->sbq_base =
2905 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2906 &rx_ring->sbq_base_dma);
2907
2908 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002909 netif_err(qdev, ifup, qdev->ndev,
2910 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002911 goto err_mem;
2912 }
2913
2914 /*
2915 * Allocate small buffer queue control blocks.
2916 */
Joe Perches14f8dc42013-02-07 11:46:27 +00002917 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2918 sizeof(struct bq_desc),
2919 GFP_KERNEL);
2920 if (rx_ring->sbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002921 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002922
Ron Mercer4545a3f2009-02-23 10:42:17 +00002923 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002924 }
2925
2926 if (rx_ring->lbq_len) {
2927 /*
2928 * Allocate large buffer queue.
2929 */
2930 rx_ring->lbq_base =
2931 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2932 &rx_ring->lbq_base_dma);
2933
2934 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002935 netif_err(qdev, ifup, qdev->ndev,
2936 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937 goto err_mem;
2938 }
2939 /*
2940 * Allocate large buffer queue control blocks.
2941 */
Joe Perches14f8dc42013-02-07 11:46:27 +00002942 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2943 sizeof(struct bq_desc),
2944 GFP_KERNEL);
2945 if (rx_ring->lbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002946 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002947
Ron Mercer4545a3f2009-02-23 10:42:17 +00002948 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002949 }
2950
2951 return 0;
2952
2953err_mem:
2954 ql_free_rx_resources(qdev, rx_ring);
2955 return -ENOMEM;
2956}
2957
2958static void ql_tx_ring_clean(struct ql_adapter *qdev)
2959{
2960 struct tx_ring *tx_ring;
2961 struct tx_ring_desc *tx_ring_desc;
2962 int i, j;
2963
2964 /*
2965 * Loop through all queues and free
2966 * any resources.
2967 */
2968 for (j = 0; j < qdev->tx_ring_count; j++) {
2969 tx_ring = &qdev->tx_ring[j];
2970 for (i = 0; i < tx_ring->wq_len; i++) {
2971 tx_ring_desc = &tx_ring->q[i];
2972 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002973 netif_err(qdev, ifdown, qdev->ndev,
2974 "Freeing lost SKB %p, from queue %d, index %d.\n",
2975 tx_ring_desc->skb, j,
2976 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002977 ql_unmap_send(qdev, tx_ring_desc,
2978 tx_ring_desc->map_cnt);
2979 dev_kfree_skb(tx_ring_desc->skb);
2980 tx_ring_desc->skb = NULL;
2981 }
2982 }
2983 }
2984}
2985
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002986static void ql_free_mem_resources(struct ql_adapter *qdev)
2987{
2988 int i;
2989
2990 for (i = 0; i < qdev->tx_ring_count; i++)
2991 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2992 for (i = 0; i < qdev->rx_ring_count; i++)
2993 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2994 ql_free_shadow_space(qdev);
2995}
2996
2997static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2998{
2999 int i;
3000
3001 /* Allocate space for our shadow registers and such. */
3002 if (ql_alloc_shadow_space(qdev))
3003 return -ENOMEM;
3004
3005 for (i = 0; i < qdev->rx_ring_count; i++) {
3006 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003007 netif_err(qdev, ifup, qdev->ndev,
3008 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003009 goto err_mem;
3010 }
3011 }
3012 /* Allocate tx queue resources */
3013 for (i = 0; i < qdev->tx_ring_count; i++) {
3014 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003015 netif_err(qdev, ifup, qdev->ndev,
3016 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003017 goto err_mem;
3018 }
3019 }
3020 return 0;
3021
3022err_mem:
3023 ql_free_mem_resources(qdev);
3024 return -ENOMEM;
3025}
3026
3027/* Set up the rx ring control block and pass it to the chip.
3028 * The control block is defined as
3029 * "Completion Queue Initialization Control Block", or cqicb.
3030 */
3031static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3032{
3033 struct cqicb *cqicb = &rx_ring->cqicb;
3034 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003035 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003036 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003037 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003038 void __iomem *doorbell_area =
3039 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3040 int err = 0;
3041 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003042 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003043 __le64 *base_indirect_ptr;
3044 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003045
3046 /* Set up the shadow registers for this ring. */
3047 rx_ring->prod_idx_sh_reg = shadow_reg;
3048 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003049 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003050 shadow_reg += sizeof(u64);
3051 shadow_reg_dma += sizeof(u64);
3052 rx_ring->lbq_base_indirect = shadow_reg;
3053 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003054 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3055 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003056 rx_ring->sbq_base_indirect = shadow_reg;
3057 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3058
3059 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003060 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003061 rx_ring->cnsmr_idx = 0;
3062 rx_ring->curr_entry = rx_ring->cq_base;
3063
3064 /* PCI doorbell mem area + 0x04 for valid register */
3065 rx_ring->valid_db_reg = doorbell_area + 0x04;
3066
3067 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003068 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003069
3070 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003071 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072
3073 memset((void *)cqicb, 0, sizeof(struct cqicb));
3074 cqicb->msix_vect = rx_ring->irq;
3075
Ron Mercer459caf52009-01-04 17:08:11 -08003076 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3077 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003078
Ron Mercer97345522009-01-09 11:31:50 +00003079 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003080
Ron Mercer97345522009-01-09 11:31:50 +00003081 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003082
3083 /*
3084 * Set up the control block load flags.
3085 */
3086 cqicb->flags = FLAGS_LC | /* Load queue base address */
3087 FLAGS_LV | /* Load MSI-X vector */
3088 FLAGS_LI; /* Load irq delay values */
3089 if (rx_ring->lbq_len) {
3090 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003091 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003092 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003093 page_entries = 0;
3094 do {
3095 *base_indirect_ptr = cpu_to_le64(tmp);
3096 tmp += DB_PAGE_SIZE;
3097 base_indirect_ptr++;
3098 page_entries++;
3099 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003100 cqicb->lbq_addr =
3101 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003102 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3103 (u16) rx_ring->lbq_buf_size;
3104 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3105 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3106 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003107 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003108 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003109 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003110 rx_ring->lbq_clean_idx = 0;
3111 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003112 }
3113 if (rx_ring->sbq_len) {
3114 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003115 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003116 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003117 page_entries = 0;
3118 do {
3119 *base_indirect_ptr = cpu_to_le64(tmp);
3120 tmp += DB_PAGE_SIZE;
3121 base_indirect_ptr++;
3122 page_entries++;
3123 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003124 cqicb->sbq_addr =
3125 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003126 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003127 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003128 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3129 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003130 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003131 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003132 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003133 rx_ring->sbq_clean_idx = 0;
3134 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003135 }
3136 switch (rx_ring->type) {
3137 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003138 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3139 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3140 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003141 case RX_Q:
3142 /* Inbound completion handling rx_rings run in
3143 * separate NAPI contexts.
3144 */
3145 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3146 64);
3147 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3148 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3149 break;
3150 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003151 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3152 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003153 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3155 CFG_LCQ, rx_ring->cq_id);
3156 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003157 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158 return err;
3159 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003160 return err;
3161}
3162
3163static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3164{
3165 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3166 void __iomem *doorbell_area =
3167 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3168 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3169 (tx_ring->wq_id * sizeof(u64));
3170 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3171 (tx_ring->wq_id * sizeof(u64));
3172 int err = 0;
3173
3174 /*
3175 * Assign doorbell registers for this tx_ring.
3176 */
3177 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003178 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003179 tx_ring->prod_idx = 0;
3180 /* TX PCI doorbell mem area + 0x04 */
3181 tx_ring->valid_db_reg = doorbell_area + 0x04;
3182
3183 /*
3184 * Assign shadow registers for this tx_ring.
3185 */
3186 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3187 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3188
3189 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3190 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3191 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3192 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3193 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003194 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003195
Ron Mercer97345522009-01-09 11:31:50 +00003196 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003197
3198 ql_init_tx_ring(qdev, tx_ring);
3199
Ron Mercere3324712009-07-02 06:06:13 +00003200 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003201 (u16) tx_ring->wq_id);
3202 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003203 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003204 return err;
3205 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003206 return err;
3207}
3208
3209static void ql_disable_msix(struct ql_adapter *qdev)
3210{
3211 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3212 pci_disable_msix(qdev->pdev);
3213 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3214 kfree(qdev->msi_x_entry);
3215 qdev->msi_x_entry = NULL;
3216 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3217 pci_disable_msi(qdev->pdev);
3218 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3219 }
3220}
3221
Ron Mercera4ab6132009-08-27 11:02:10 +00003222/* We start by trying to get the number of vectors
3223 * stored in qdev->intr_count. If we don't get that
3224 * many then we reduce the count and try again.
3225 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226static void ql_enable_msix(struct ql_adapter *qdev)
3227{
Ron Mercera4ab6132009-08-27 11:02:10 +00003228 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003229
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003231 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232 /* Try to alloc space for the msix struct,
3233 * if it fails then go to MSI/legacy.
3234 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003235 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 sizeof(struct msix_entry),
3237 GFP_KERNEL);
3238 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003239 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240 goto msi;
3241 }
3242
Ron Mercera4ab6132009-08-27 11:02:10 +00003243 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003244 qdev->msi_x_entry[i].entry = i;
3245
Ron Mercera4ab6132009-08-27 11:02:10 +00003246 /* Loop to get our vectors. We start with
3247 * what we want and settle for what we get.
3248 */
3249 do {
3250 err = pci_enable_msix(qdev->pdev,
3251 qdev->msi_x_entry, qdev->intr_count);
3252 if (err > 0)
3253 qdev->intr_count = err;
3254 } while (err > 0);
3255
3256 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003257 kfree(qdev->msi_x_entry);
3258 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003259 netif_warn(qdev, ifup, qdev->ndev,
3260 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003261 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003262 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003263 } else if (err == 0) {
3264 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003265 netif_info(qdev, ifup, qdev->ndev,
3266 "MSI-X Enabled, got %d vectors.\n",
3267 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003268 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003269 }
3270 }
3271msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003272 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003273 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003274 if (!pci_enable_msi(qdev->pdev)) {
3275 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003276 netif_info(qdev, ifup, qdev->ndev,
3277 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003278 return;
3279 }
3280 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003281 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003282 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3283 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003284}
3285
Ron Mercer39aa8162009-08-27 11:02:11 +00003286/* Each vector services 1 RSS ring and and 1 or more
3287 * TX completion rings. This function loops through
3288 * the TX completion rings and assigns the vector that
3289 * will service it. An example would be if there are
3290 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3291 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003292 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003293 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3294 */
3295static void ql_set_tx_vect(struct ql_adapter *qdev)
3296{
3297 int i, j, vect;
3298 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3299
3300 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3301 /* Assign irq vectors to TX rx_rings.*/
3302 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3303 i < qdev->rx_ring_count; i++) {
3304 if (j == tx_rings_per_vector) {
3305 vect++;
3306 j = 0;
3307 }
3308 qdev->rx_ring[i].irq = vect;
3309 j++;
3310 }
3311 } else {
3312 /* For single vector all rings have an irq
3313 * of zero.
3314 */
3315 for (i = 0; i < qdev->rx_ring_count; i++)
3316 qdev->rx_ring[i].irq = 0;
3317 }
3318}
3319
3320/* Set the interrupt mask for this vector. Each vector
3321 * will service 1 RSS ring and 1 or more TX completion
3322 * rings. This function sets up a bit mask per vector
3323 * that indicates which rings it services.
3324 */
3325static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3326{
3327 int j, vect = ctx->intr;
3328 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3329
3330 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3331 /* Add the RSS ring serviced by this vector
3332 * to the mask.
3333 */
3334 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3335 /* Add the TX ring(s) serviced by this vector
3336 * to the mask. */
3337 for (j = 0; j < tx_rings_per_vector; j++) {
3338 ctx->irq_mask |=
3339 (1 << qdev->rx_ring[qdev->rss_ring_count +
3340 (vect * tx_rings_per_vector) + j].cq_id);
3341 }
3342 } else {
3343 /* For single vector we just shift each queue's
3344 * ID into the mask.
3345 */
3346 for (j = 0; j < qdev->rx_ring_count; j++)
3347 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3348 }
3349}
3350
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003351/*
3352 * Here we build the intr_context structures based on
3353 * our rx_ring count and intr vector count.
3354 * The intr_context structure is used to hook each vector
3355 * to possibly different handlers.
3356 */
3357static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3358{
3359 int i = 0;
3360 struct intr_context *intr_context = &qdev->intr_context[0];
3361
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003362 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3363 /* Each rx_ring has it's
3364 * own intr_context since we have separate
3365 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003366 */
3367 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3368 qdev->rx_ring[i].irq = i;
3369 intr_context->intr = i;
3370 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003371 /* Set up this vector's bit-mask that indicates
3372 * which queues it services.
3373 */
3374 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003375 /*
3376 * We set up each vectors enable/disable/read bits so
3377 * there's no bit/mask calculations in the critical path.
3378 */
3379 intr_context->intr_en_mask =
3380 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3382 | i;
3383 intr_context->intr_dis_mask =
3384 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3386 INTR_EN_IHD | i;
3387 intr_context->intr_read_mask =
3388 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3389 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3390 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003391 if (i == 0) {
3392 /* The first vector/queue handles
3393 * broadcast/multicast, fatal errors,
3394 * and firmware events. This in addition
3395 * to normal inbound NAPI processing.
3396 */
3397 intr_context->handler = qlge_isr;
3398 sprintf(intr_context->name, "%s-rx-%d",
3399 qdev->ndev->name, i);
3400 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003401 /*
3402 * Inbound queues handle unicast frames only.
3403 */
3404 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003405 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003406 qdev->ndev->name, i);
3407 }
3408 }
3409 } else {
3410 /*
3411 * All rx_rings use the same intr_context since
3412 * there is only one vector.
3413 */
3414 intr_context->intr = 0;
3415 intr_context->qdev = qdev;
3416 /*
3417 * We set up each vectors enable/disable/read bits so
3418 * there's no bit/mask calculations in the critical path.
3419 */
3420 intr_context->intr_en_mask =
3421 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3422 intr_context->intr_dis_mask =
3423 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3424 INTR_EN_TYPE_DISABLE;
3425 intr_context->intr_read_mask =
3426 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3427 /*
3428 * Single interrupt means one handler for all rings.
3429 */
3430 intr_context->handler = qlge_isr;
3431 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003432 /* Set up this vector's bit-mask that indicates
3433 * which queues it services. In this case there is
3434 * a single vector so it will service all RSS and
3435 * TX completion rings.
3436 */
3437 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003438 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003439 /* Tell the TX completion rings which MSIx vector
3440 * they will be using.
3441 */
3442 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003443}
3444
3445static void ql_free_irq(struct ql_adapter *qdev)
3446{
3447 int i;
3448 struct intr_context *intr_context = &qdev->intr_context[0];
3449
3450 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3451 if (intr_context->hooked) {
3452 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3453 free_irq(qdev->msi_x_entry[i].vector,
3454 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003455 } else {
3456 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003457 }
3458 }
3459 }
3460 ql_disable_msix(qdev);
3461}
3462
3463static int ql_request_irq(struct ql_adapter *qdev)
3464{
3465 int i;
3466 int status = 0;
3467 struct pci_dev *pdev = qdev->pdev;
3468 struct intr_context *intr_context = &qdev->intr_context[0];
3469
3470 ql_resolve_queues_to_irqs(qdev);
3471
3472 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3473 atomic_set(&intr_context->irq_cnt, 0);
3474 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3475 status = request_irq(qdev->msi_x_entry[i].vector,
3476 intr_context->handler,
3477 0,
3478 intr_context->name,
3479 &qdev->rx_ring[i]);
3480 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003481 netif_err(qdev, ifup, qdev->ndev,
3482 "Failed request for MSIX interrupt %d.\n",
3483 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003484 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003485 }
3486 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003487 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3488 "trying msi or legacy interrupts.\n");
3489 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3490 "%s: irq = %d.\n", __func__, pdev->irq);
3491 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3492 "%s: context->name = %s.\n", __func__,
3493 intr_context->name);
3494 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3495 "%s: dev_id = 0x%p.\n", __func__,
3496 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003497 status =
3498 request_irq(pdev->irq, qlge_isr,
3499 test_bit(QL_MSI_ENABLED,
3500 &qdev->
3501 flags) ? 0 : IRQF_SHARED,
3502 intr_context->name, &qdev->rx_ring[0]);
3503 if (status)
3504 goto err_irq;
3505
Joe Perchesae9540f72010-02-09 11:49:52 +00003506 netif_err(qdev, ifup, qdev->ndev,
3507 "Hooked intr %d, queue type %s, with name %s.\n",
3508 i,
3509 qdev->rx_ring[0].type == DEFAULT_Q ?
3510 "DEFAULT_Q" :
3511 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3512 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3513 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003514 }
3515 intr_context->hooked = 1;
3516 }
3517 return status;
3518err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003519 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003520 ql_free_irq(qdev);
3521 return status;
3522}
3523
3524static int ql_start_rss(struct ql_adapter *qdev)
3525{
Joe Perches215faf92010-12-21 02:16:10 -08003526 static const u8 init_hash_seed[] = {
3527 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3528 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3529 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3530 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3531 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3532 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003533 struct ricb *ricb = &qdev->ricb;
3534 int status = 0;
3535 int i;
3536 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3537
Ron Mercere3324712009-07-02 06:06:13 +00003538 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003539
Ron Mercerb2014ff2009-08-27 11:02:09 +00003540 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003541 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003542 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3543 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544
3545 /*
3546 * Fill out the Indirection Table.
3547 */
Ron Mercer541ae282009-10-08 09:54:37 +00003548 for (i = 0; i < 1024; i++)
3549 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003550
Ron Mercer541ae282009-10-08 09:54:37 +00003551 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3552 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553
Ron Mercere3324712009-07-02 06:06:13 +00003554 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003555 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003556 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003557 return status;
3558 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 return status;
3560}
3561
Ron Mercera5f59dc2009-07-02 06:06:07 +00003562static int ql_clear_routing_entries(struct ql_adapter *qdev)
3563{
3564 int i, status = 0;
3565
3566 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3567 if (status)
3568 return status;
3569 /* Clear all the entries in the routing table. */
3570 for (i = 0; i < 16; i++) {
3571 status = ql_set_routing_reg(qdev, i, 0, 0);
3572 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003573 netif_err(qdev, ifup, qdev->ndev,
3574 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003575 break;
3576 }
3577 }
3578 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3579 return status;
3580}
3581
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003582/* Initialize the frame-to-queue routing. */
3583static int ql_route_initialize(struct ql_adapter *qdev)
3584{
3585 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586
3587 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003588 status = ql_clear_routing_entries(qdev);
3589 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003590 return status;
3591
3592 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3593 if (status)
3594 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003595
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003596 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3597 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003599 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003600 "Failed to init routing register "
3601 "for IP CSUM error packets.\n");
3602 goto exit;
3603 }
3604 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3605 RT_IDX_TU_CSUM_ERR, 1);
3606 if (status) {
3607 netif_err(qdev, ifup, qdev->ndev,
3608 "Failed to init routing register "
3609 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003610 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003611 }
3612 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3613 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003614 netif_err(qdev, ifup, qdev->ndev,
3615 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003616 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003617 }
3618 /* If we have more than one inbound queue, then turn on RSS in the
3619 * routing block.
3620 */
3621 if (qdev->rss_ring_count > 1) {
3622 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3623 RT_IDX_RSS_MATCH, 1);
3624 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003625 netif_err(qdev, ifup, qdev->ndev,
3626 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003627 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003628 }
3629 }
3630
3631 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3632 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003633 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003634 netif_err(qdev, ifup, qdev->ndev,
3635 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003636exit:
3637 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003638 return status;
3639}
3640
Ron Mercer2ee1e272009-03-03 12:10:33 +00003641int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003642{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003643 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003644
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003645 /* If check if the link is up and use to
3646 * determine if we are setting or clearing
3647 * the MAC address in the CAM.
3648 */
3649 set = ql_read32(qdev, STS);
3650 set &= qdev->port_link_up;
3651 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003652 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003653 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003654 return status;
3655 }
3656
3657 status = ql_route_initialize(qdev);
3658 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003659 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003660
3661 return status;
3662}
3663
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003664static int ql_adapter_initialize(struct ql_adapter *qdev)
3665{
3666 u32 value, mask;
3667 int i;
3668 int status = 0;
3669
3670 /*
3671 * Set up the System register to halt on errors.
3672 */
3673 value = SYS_EFE | SYS_FAE;
3674 mask = value << 16;
3675 ql_write32(qdev, SYS, mask | value);
3676
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003677 /* Set the default queue, and VLAN behavior. */
3678 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3679 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003680 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3681
3682 /* Set the MPI interrupt to enabled. */
3683 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3684
3685 /* Enable the function, set pagesize, enable error checking. */
3686 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003687 FSC_EC | FSC_VM_PAGE_4K;
3688 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003689
3690 /* Set/clear header splitting. */
3691 mask = FSC_VM_PAGESIZE_MASK |
3692 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3693 ql_write32(qdev, FSC, mask | value);
3694
Ron Mercer572c5262010-01-02 10:37:42 +00003695 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003696
Ron Mercera3b71932009-10-08 09:54:38 +00003697 /* Set RX packet routing to use port/pci function on which the
3698 * packet arrived on in addition to usual frame routing.
3699 * This is helpful on bonding where both interfaces can have
3700 * the same MAC address.
3701 */
3702 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003703 /* Reroute all packets to our Interface.
3704 * They may have been routed to MPI firmware
3705 * due to WOL.
3706 */
3707 value = ql_read32(qdev, MGMT_RCV_CFG);
3708 value &= ~MGMT_RCV_CFG_RM;
3709 mask = 0xffff0000;
3710
3711 /* Sticky reg needs clearing due to WOL. */
3712 ql_write32(qdev, MGMT_RCV_CFG, mask);
3713 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3714
3715 /* Default WOL is enable on Mezz cards */
3716 if (qdev->pdev->subsystem_device == 0x0068 ||
3717 qdev->pdev->subsystem_device == 0x0180)
3718 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003719
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003720 /* Start up the rx queues. */
3721 for (i = 0; i < qdev->rx_ring_count; i++) {
3722 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3723 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003724 netif_err(qdev, ifup, qdev->ndev,
3725 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003726 return status;
3727 }
3728 }
3729
3730 /* If there is more than one inbound completion queue
3731 * then download a RICB to configure RSS.
3732 */
3733 if (qdev->rss_ring_count > 1) {
3734 status = ql_start_rss(qdev);
3735 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003736 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003737 return status;
3738 }
3739 }
3740
3741 /* Start up the tx queues. */
3742 for (i = 0; i < qdev->tx_ring_count; i++) {
3743 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3744 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003745 netif_err(qdev, ifup, qdev->ndev,
3746 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003747 return status;
3748 }
3749 }
3750
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003751 /* Initialize the port and set the max framesize. */
3752 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003753 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003754 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003755
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003756 /* Set up the MAC address and frame routing filter. */
3757 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003758 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003759 netif_err(qdev, ifup, qdev->ndev,
3760 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003761 return status;
3762 }
3763
3764 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003765 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003766 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003767
3768 return status;
3769}
3770
3771/* Issue soft reset to chip. */
3772static int ql_adapter_reset(struct ql_adapter *qdev)
3773{
3774 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003775 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003776 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003777
Ron Mercera5f59dc2009-07-02 06:06:07 +00003778 /* Clear all the entries in the routing table. */
3779 status = ql_clear_routing_entries(qdev);
3780 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003781 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003782 return status;
3783 }
3784
3785 end_jiffies = jiffies +
3786 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003787
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003788 /* Check if bit is set then skip the mailbox command and
3789 * clear the bit, else we are in normal reset process.
3790 */
3791 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3792 /* Stop management traffic. */
3793 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003794
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003795 /* Wait for the NIC and MGMNT FIFOs to empty. */
3796 ql_wait_fifo_empty(qdev);
3797 } else
3798 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003799
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003800 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003801
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003802 do {
3803 value = ql_read32(qdev, RST_FO);
3804 if ((value & RST_FO_FR) == 0)
3805 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003806 cpu_relax();
3807 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003809 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003810 netif_err(qdev, ifdown, qdev->ndev,
3811 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003812 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 }
3814
Ron Mercer84087f42009-10-08 09:54:41 +00003815 /* Resume management traffic. */
3816 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003817 return status;
3818}
3819
3820static void ql_display_dev_info(struct net_device *ndev)
3821{
Joe Perchesb16fed02010-11-15 11:12:28 +00003822 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003823
Joe Perchesae9540f72010-02-09 11:49:52 +00003824 netif_info(qdev, probe, qdev->ndev,
3825 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3826 "XG Roll = %d, XG Rev = %d.\n",
3827 qdev->func,
3828 qdev->port,
3829 qdev->chip_rev_id & 0x0000000f,
3830 qdev->chip_rev_id >> 4 & 0x0000000f,
3831 qdev->chip_rev_id >> 8 & 0x0000000f,
3832 qdev->chip_rev_id >> 12 & 0x0000000f);
3833 netif_info(qdev, probe, qdev->ndev,
3834 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003835}
3836
stephen hemmingerac409212010-10-21 07:50:54 +00003837static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003838{
3839 int status = 0;
3840 u32 wol = MB_WOL_DISABLE;
3841
3842 /* The CAM is still intact after a reset, but if we
3843 * are doing WOL, then we may need to program the
3844 * routing regs. We would also need to issue the mailbox
3845 * commands to instruct the MPI what to do per the ethtool
3846 * settings.
3847 */
3848
3849 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3850 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003851 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003852 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003853 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003854 return -EINVAL;
3855 }
3856
3857 if (qdev->wol & WAKE_MAGIC) {
3858 status = ql_mb_wol_set_magic(qdev, 1);
3859 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003860 netif_err(qdev, ifdown, qdev->ndev,
3861 "Failed to set magic packet on %s.\n",
3862 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003863 return status;
3864 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003865 netif_info(qdev, drv, qdev->ndev,
3866 "Enabled magic packet successfully on %s.\n",
3867 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003868
3869 wol |= MB_WOL_MAGIC_PKT;
3870 }
3871
3872 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003873 wol |= MB_WOL_MODE_ON;
3874 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003875 netif_err(qdev, drv, qdev->ndev,
3876 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003877 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003878 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003879 }
3880
3881 return status;
3882}
3883
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003884static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003885{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003886
Ron Mercer6497b602009-02-12 16:37:13 -08003887 /* Don't kill the reset worker thread if we
3888 * are in the process of recovery.
3889 */
3890 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3891 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003892 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3893 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003894 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003895 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003896 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003897}
3898
3899static int ql_adapter_down(struct ql_adapter *qdev)
3900{
3901 int i, status = 0;
3902
3903 ql_link_off(qdev);
3904
3905 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003906
Ron Mercer39aa8162009-08-27 11:02:11 +00003907 for (i = 0; i < qdev->rss_ring_count; i++)
3908 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003909
3910 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3911
3912 ql_disable_interrupts(qdev);
3913
3914 ql_tx_ring_clean(qdev);
3915
Ron Mercer6b318cb2009-03-09 10:59:26 +00003916 /* Call netif_napi_del() from common point.
3917 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003918 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003919 netif_napi_del(&qdev->rx_ring[i].napi);
3920
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003921 status = ql_adapter_reset(qdev);
3922 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003923 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3924 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003925 ql_free_rx_buffers(qdev);
3926
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003927 return status;
3928}
3929
3930static int ql_adapter_up(struct ql_adapter *qdev)
3931{
3932 int err = 0;
3933
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003934 err = ql_adapter_initialize(qdev);
3935 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003936 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003937 goto err_init;
3938 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003939 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003940 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003941 /* If the port is initialized and the
3942 * link is up the turn on the carrier.
3943 */
3944 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3945 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003946 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003947 /* Restore rx mode. */
3948 clear_bit(QL_ALLMULTI, &qdev->flags);
3949 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3950 qlge_set_multicast_list(qdev->ndev);
3951
Ron Mercerc1b60092010-10-27 04:58:12 +00003952 /* Restore vlan setting. */
3953 qlge_restore_vlan(qdev);
3954
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003955 ql_enable_interrupts(qdev);
3956 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003957 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003958
3959 return 0;
3960err_init:
3961 ql_adapter_reset(qdev);
3962 return err;
3963}
3964
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003965static void ql_release_adapter_resources(struct ql_adapter *qdev)
3966{
3967 ql_free_mem_resources(qdev);
3968 ql_free_irq(qdev);
3969}
3970
3971static int ql_get_adapter_resources(struct ql_adapter *qdev)
3972{
3973 int status = 0;
3974
3975 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003976 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003977 return -ENOMEM;
3978 }
3979 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003980 return status;
3981}
3982
3983static int qlge_close(struct net_device *ndev)
3984{
3985 struct ql_adapter *qdev = netdev_priv(ndev);
3986
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003987 /* If we hit pci_channel_io_perm_failure
3988 * failure condition, then we already
3989 * brought the adapter down.
3990 */
3991 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003992 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003993 clear_bit(QL_EEH_FATAL, &qdev->flags);
3994 return 0;
3995 }
3996
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003997 /*
3998 * Wait for device to recover from a reset.
3999 * (Rarely happens, but possible.)
4000 */
4001 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4002 msleep(1);
4003 ql_adapter_down(qdev);
4004 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004005 return 0;
4006}
4007
4008static int ql_configure_rings(struct ql_adapter *qdev)
4009{
4010 int i;
4011 struct rx_ring *rx_ring;
4012 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004013 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004014 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4015 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4016
4017 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004018
Ron Mercera4ab6132009-08-27 11:02:10 +00004019 /* In a perfect world we have one RSS ring for each CPU
4020 * and each has it's own vector. To do that we ask for
4021 * cpu_cnt vectors. ql_enable_msix() will adjust the
4022 * vector count to what we actually get. We then
4023 * allocate an RSS ring for each.
4024 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004025 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004026 qdev->intr_count = cpu_cnt;
4027 ql_enable_msix(qdev);
4028 /* Adjust the RSS ring count to the actual vector count. */
4029 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004030 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004031 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004032
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004033 for (i = 0; i < qdev->tx_ring_count; i++) {
4034 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004035 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004036 tx_ring->qdev = qdev;
4037 tx_ring->wq_id = i;
4038 tx_ring->wq_len = qdev->tx_ring_size;
4039 tx_ring->wq_size =
4040 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4041
4042 /*
4043 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004044 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004045 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004046 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004047 }
4048
4049 for (i = 0; i < qdev->rx_ring_count; i++) {
4050 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004051 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004052 rx_ring->qdev = qdev;
4053 rx_ring->cq_id = i;
4054 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004055 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004056 /*
4057 * Inbound (RSS) queues.
4058 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004059 rx_ring->cq_len = qdev->rx_ring_size;
4060 rx_ring->cq_size =
4061 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4062 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4063 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004064 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004065 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004066 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4067 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004068 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004069 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004070 rx_ring->type = RX_Q;
4071 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004072 /*
4073 * Outbound queue handles outbound completions only.
4074 */
4075 /* outbound cq is same size as tx_ring it services. */
4076 rx_ring->cq_len = qdev->tx_ring_size;
4077 rx_ring->cq_size =
4078 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4079 rx_ring->lbq_len = 0;
4080 rx_ring->lbq_size = 0;
4081 rx_ring->lbq_buf_size = 0;
4082 rx_ring->sbq_len = 0;
4083 rx_ring->sbq_size = 0;
4084 rx_ring->sbq_buf_size = 0;
4085 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004086 }
4087 }
4088 return 0;
4089}
4090
4091static int qlge_open(struct net_device *ndev)
4092{
4093 int err = 0;
4094 struct ql_adapter *qdev = netdev_priv(ndev);
4095
Ron Mercer74e12432009-11-11 12:54:04 +00004096 err = ql_adapter_reset(qdev);
4097 if (err)
4098 return err;
4099
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004100 err = ql_configure_rings(qdev);
4101 if (err)
4102 return err;
4103
4104 err = ql_get_adapter_resources(qdev);
4105 if (err)
4106 goto error_up;
4107
4108 err = ql_adapter_up(qdev);
4109 if (err)
4110 goto error_up;
4111
4112 return err;
4113
4114error_up:
4115 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004116 return err;
4117}
4118
Ron Mercer7c734352009-10-19 03:32:19 +00004119static int ql_change_rx_buffers(struct ql_adapter *qdev)
4120{
4121 struct rx_ring *rx_ring;
4122 int i, status;
4123 u32 lbq_buf_len;
4124
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004125 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004126 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4127 int i = 3;
4128 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004129 netif_err(qdev, ifup, qdev->ndev,
4130 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004131 ssleep(1);
4132 }
4133
4134 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004135 netif_err(qdev, ifup, qdev->ndev,
4136 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004137 return -ETIMEDOUT;
4138 }
4139 }
4140
4141 status = ql_adapter_down(qdev);
4142 if (status)
4143 goto error;
4144
4145 /* Get the new rx buffer size. */
4146 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4147 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4148 qdev->lbq_buf_order = get_order(lbq_buf_len);
4149
4150 for (i = 0; i < qdev->rss_ring_count; i++) {
4151 rx_ring = &qdev->rx_ring[i];
4152 /* Set the new size. */
4153 rx_ring->lbq_buf_size = lbq_buf_len;
4154 }
4155
4156 status = ql_adapter_up(qdev);
4157 if (status)
4158 goto error;
4159
4160 return status;
4161error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004162 netif_alert(qdev, ifup, qdev->ndev,
4163 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004164 set_bit(QL_ADAPTER_UP, &qdev->flags);
4165 dev_close(qdev->ndev);
4166 return status;
4167}
4168
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004169static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4170{
4171 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004172 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004173
4174 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004175 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004176 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004177 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004178 } else
4179 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004180
4181 queue_delayed_work(qdev->workqueue,
4182 &qdev->mpi_port_cfg_work, 3*HZ);
4183
Breno Leitao746079d2010-02-04 10:11:19 +00004184 ndev->mtu = new_mtu;
4185
Ron Mercer7c734352009-10-19 03:32:19 +00004186 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004187 return 0;
4188 }
4189
Ron Mercer7c734352009-10-19 03:32:19 +00004190 status = ql_change_rx_buffers(qdev);
4191 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004192 netif_err(qdev, ifup, qdev->ndev,
4193 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004194 }
4195
4196 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004197}
4198
4199static struct net_device_stats *qlge_get_stats(struct net_device
4200 *ndev)
4201{
Ron Mercer885ee392009-11-03 13:49:31 +00004202 struct ql_adapter *qdev = netdev_priv(ndev);
4203 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4204 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4205 unsigned long pkts, mcast, dropped, errors, bytes;
4206 int i;
4207
4208 /* Get RX stats. */
4209 pkts = mcast = dropped = errors = bytes = 0;
4210 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4211 pkts += rx_ring->rx_packets;
4212 bytes += rx_ring->rx_bytes;
4213 dropped += rx_ring->rx_dropped;
4214 errors += rx_ring->rx_errors;
4215 mcast += rx_ring->rx_multicast;
4216 }
4217 ndev->stats.rx_packets = pkts;
4218 ndev->stats.rx_bytes = bytes;
4219 ndev->stats.rx_dropped = dropped;
4220 ndev->stats.rx_errors = errors;
4221 ndev->stats.multicast = mcast;
4222
4223 /* Get TX stats. */
4224 pkts = errors = bytes = 0;
4225 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4226 pkts += tx_ring->tx_packets;
4227 bytes += tx_ring->tx_bytes;
4228 errors += tx_ring->tx_errors;
4229 }
4230 ndev->stats.tx_packets = pkts;
4231 ndev->stats.tx_bytes = bytes;
4232 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004233 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004234}
4235
stephen hemmingerac409212010-10-21 07:50:54 +00004236static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004237{
Joe Perchesb16fed02010-11-15 11:12:28 +00004238 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004239 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004240 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004241
Ron Mercercc288f52009-02-23 10:42:14 +00004242 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4243 if (status)
4244 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004245 /*
4246 * Set or clear promiscuous mode if a
4247 * transition is taking place.
4248 */
4249 if (ndev->flags & IFF_PROMISC) {
4250 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4251 if (ql_set_routing_reg
4252 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004253 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004254 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004255 } else {
4256 set_bit(QL_PROMISCUOUS, &qdev->flags);
4257 }
4258 }
4259 } else {
4260 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4261 if (ql_set_routing_reg
4262 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004263 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004264 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004265 } else {
4266 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4267 }
4268 }
4269 }
4270
4271 /*
4272 * Set or clear all multicast mode if a
4273 * transition is taking place.
4274 */
4275 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004276 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004277 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4278 if (ql_set_routing_reg
4279 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004280 netif_err(qdev, hw, qdev->ndev,
4281 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004282 } else {
4283 set_bit(QL_ALLMULTI, &qdev->flags);
4284 }
4285 }
4286 } else {
4287 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4288 if (ql_set_routing_reg
4289 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004290 netif_err(qdev, hw, qdev->ndev,
4291 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004292 } else {
4293 clear_bit(QL_ALLMULTI, &qdev->flags);
4294 }
4295 }
4296 }
4297
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004298 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004299 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4300 if (status)
4301 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004302 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004303 netdev_for_each_mc_addr(ha, ndev) {
4304 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004305 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004306 netif_err(qdev, hw, qdev->ndev,
4307 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004308 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004309 goto exit;
4310 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004311 i++;
4312 }
Ron Mercercc288f52009-02-23 10:42:14 +00004313 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004314 if (ql_set_routing_reg
4315 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004316 netif_err(qdev, hw, qdev->ndev,
4317 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004318 } else {
4319 set_bit(QL_ALLMULTI, &qdev->flags);
4320 }
4321 }
4322exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004323 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004324}
4325
4326static int qlge_set_mac_address(struct net_device *ndev, void *p)
4327{
Joe Perchesb16fed02010-11-15 11:12:28 +00004328 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004329 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004330 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004331
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004332 if (!is_valid_ether_addr(addr->sa_data))
4333 return -EADDRNOTAVAIL;
4334 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004335 /* Update local copy of current mac address. */
4336 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004337
Ron Mercercc288f52009-02-23 10:42:14 +00004338 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4339 if (status)
4340 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004341 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4342 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004343 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004344 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004345 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4346 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004347}
4348
4349static void qlge_tx_timeout(struct net_device *ndev)
4350{
Joe Perchesb16fed02010-11-15 11:12:28 +00004351 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004352 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004353}
4354
4355static void ql_asic_reset_work(struct work_struct *work)
4356{
4357 struct ql_adapter *qdev =
4358 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004359 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004360 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004361 status = ql_adapter_down(qdev);
4362 if (status)
4363 goto error;
4364
4365 status = ql_adapter_up(qdev);
4366 if (status)
4367 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004368
4369 /* Restore rx mode. */
4370 clear_bit(QL_ALLMULTI, &qdev->flags);
4371 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4372 qlge_set_multicast_list(qdev->ndev);
4373
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004374 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004375 return;
4376error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004377 netif_alert(qdev, ifup, qdev->ndev,
4378 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004379
Ron Mercerdb988122009-03-09 10:59:17 +00004380 set_bit(QL_ADAPTER_UP, &qdev->flags);
4381 dev_close(qdev->ndev);
4382 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004383}
4384
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004385static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004386 .get_flash = ql_get_8012_flash_params,
4387 .port_initialize = ql_8012_port_initialize,
4388};
4389
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004390static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004391 .get_flash = ql_get_8000_flash_params,
4392 .port_initialize = ql_8000_port_initialize,
4393};
4394
Ron Mercere4552f52009-06-09 05:39:32 +00004395/* Find the pcie function number for the other NIC
4396 * on this chip. Since both NIC functions share a
4397 * common firmware we have the lowest enabled function
4398 * do any common work. Examples would be resetting
4399 * after a fatal firmware error, or doing a firmware
4400 * coredump.
4401 */
4402static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004403{
Ron Mercere4552f52009-06-09 05:39:32 +00004404 int status = 0;
4405 u32 temp;
4406 u32 nic_func1, nic_func2;
4407
4408 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4409 &temp);
4410 if (status)
4411 return status;
4412
4413 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4414 MPI_TEST_NIC_FUNC_MASK);
4415 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4416 MPI_TEST_NIC_FUNC_MASK);
4417
4418 if (qdev->func == nic_func1)
4419 qdev->alt_func = nic_func2;
4420 else if (qdev->func == nic_func2)
4421 qdev->alt_func = nic_func1;
4422 else
4423 status = -EIO;
4424
4425 return status;
4426}
4427
4428static int ql_get_board_info(struct ql_adapter *qdev)
4429{
4430 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004431 qdev->func =
4432 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004433 if (qdev->func > 3)
4434 return -EIO;
4435
4436 status = ql_get_alt_pcie_func(qdev);
4437 if (status)
4438 return status;
4439
4440 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4441 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004442 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4443 qdev->port_link_up = STS_PL1;
4444 qdev->port_init = STS_PI1;
4445 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4446 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4447 } else {
4448 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4449 qdev->port_link_up = STS_PL0;
4450 qdev->port_init = STS_PI0;
4451 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4452 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4453 }
4454 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004455 qdev->device_id = qdev->pdev->device;
4456 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4457 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004458 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4459 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004460 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004461}
4462
4463static void ql_release_all(struct pci_dev *pdev)
4464{
4465 struct net_device *ndev = pci_get_drvdata(pdev);
4466 struct ql_adapter *qdev = netdev_priv(ndev);
4467
4468 if (qdev->workqueue) {
4469 destroy_workqueue(qdev->workqueue);
4470 qdev->workqueue = NULL;
4471 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004472
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004473 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004474 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004475 if (qdev->doorbell_area)
4476 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004477 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004478 pci_release_regions(pdev);
4479 pci_set_drvdata(pdev, NULL);
4480}
4481
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004482static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4483 int cards_found)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004484{
4485 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004486 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004487
Ron Mercere3324712009-07-02 06:06:13 +00004488 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004489 err = pci_enable_device(pdev);
4490 if (err) {
4491 dev_err(&pdev->dev, "PCI device enable failed.\n");
4492 return err;
4493 }
4494
Ron Mercerebd6e772009-09-29 08:39:25 +00004495 qdev->ndev = ndev;
4496 qdev->pdev = pdev;
4497 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004498
Ron Mercerbc9167f2009-10-10 09:35:04 +00004499 /* Set PCIe read request size */
4500 err = pcie_set_readrq(pdev, 4096);
4501 if (err) {
4502 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004503 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004504 }
4505
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004506 err = pci_request_regions(pdev, DRV_NAME);
4507 if (err) {
4508 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004509 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004510 }
4511
4512 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004513 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004514 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004515 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004516 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004517 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004518 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004519 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004520 }
4521
4522 if (err) {
4523 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004524 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004525 }
4526
Ron Mercer73475332009-11-06 07:44:58 +00004527 /* Set PCIe reset type for EEH to fundamental. */
4528 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004529 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 qdev->reg_base =
4531 ioremap_nocache(pci_resource_start(pdev, 1),
4532 pci_resource_len(pdev, 1));
4533 if (!qdev->reg_base) {
4534 dev_err(&pdev->dev, "Register mapping failed.\n");
4535 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004536 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004537 }
4538
4539 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4540 qdev->doorbell_area =
4541 ioremap_nocache(pci_resource_start(pdev, 3),
4542 pci_resource_len(pdev, 3));
4543 if (!qdev->doorbell_area) {
4544 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4545 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004546 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004547 }
4548
Ron Mercere4552f52009-06-09 05:39:32 +00004549 err = ql_get_board_info(qdev);
4550 if (err) {
4551 dev_err(&pdev->dev, "Register access failed.\n");
4552 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004553 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004554 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004555 qdev->msg_enable = netif_msg_init(debug, default_msg);
4556 spin_lock_init(&qdev->hw_lock);
4557 spin_lock_init(&qdev->stats_lock);
4558
Ron Mercer8aae2602010-01-15 13:31:28 +00004559 if (qlge_mpi_coredump) {
4560 qdev->mpi_coredump =
4561 vmalloc(sizeof(struct ql_mpi_coredump));
4562 if (qdev->mpi_coredump == NULL) {
Ron Mercer8aae2602010-01-15 13:31:28 +00004563 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004564 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004565 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004566 if (qlge_force_coredump)
4567 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004568 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004569 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004570 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 if (err) {
4572 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004573 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574 }
4575
Ron Mercer801e9092010-02-17 06:41:22 +00004576 /* Keep local copy of current mac address. */
4577 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578
4579 /* Set up the default ring sizes. */
4580 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4581 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4582
4583 /* Set up the coalescing parameters. */
4584 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4585 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4586 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4587 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4588
4589 /*
4590 * Set up the operating parameters.
4591 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004592 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4593 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4594 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4595 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004596 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004597 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004598 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004599 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004600 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004601
4602 if (!cards_found) {
4603 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4604 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4605 DRV_NAME, DRV_VERSION);
4606 }
4607 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004608err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004609 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004610err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004611 pci_disable_device(pdev);
4612 return err;
4613}
4614
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004615static const struct net_device_ops qlge_netdev_ops = {
4616 .ndo_open = qlge_open,
4617 .ndo_stop = qlge_close,
4618 .ndo_start_xmit = qlge_send,
4619 .ndo_change_mtu = qlge_change_mtu,
4620 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004621 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004622 .ndo_set_mac_address = qlge_set_mac_address,
4623 .ndo_validate_addr = eth_validate_addr,
4624 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004625 .ndo_fix_features = qlge_fix_features,
4626 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004627 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4628 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004629};
4630
Ron Mercer15c052f2010-02-04 13:32:46 -08004631static void ql_timer(unsigned long data)
4632{
4633 struct ql_adapter *qdev = (struct ql_adapter *)data;
4634 u32 var = 0;
4635
4636 var = ql_read32(qdev, STS);
4637 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004638 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004639 return;
4640 }
4641
Breno Leitao72046d82010-07-01 03:00:17 +00004642 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004643}
4644
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004645static int qlge_probe(struct pci_dev *pdev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004646 const struct pci_device_id *pci_entry)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004647{
4648 struct net_device *ndev = NULL;
4649 struct ql_adapter *qdev = NULL;
4650 static int cards_found = 0;
4651 int err = 0;
4652
Ron Mercer1e213302009-03-09 10:59:21 +00004653 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004654 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004655 if (!ndev)
4656 return -ENOMEM;
4657
4658 err = ql_init_device(pdev, ndev, cards_found);
4659 if (err < 0) {
4660 free_netdev(ndev);
4661 return err;
4662 }
4663
4664 qdev = netdev_priv(ndev);
4665 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004666 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
Amerigo Wangf7e9e232013-01-10 22:52:54 +00004667 NETIF_F_TSO | NETIF_F_TSO_ECN |
Patrick McHardyf6469682013-04-19 02:04:27 +00004668 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
Michał Mirosław88230fd2011-04-18 13:31:21 +00004669 ndev->features = ndev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004670 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
brenohl@br.ibm.com1a0150a92012-07-27 08:54:52 +00004671 ndev->vlan_features = ndev->hw_features;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004672
4673 if (test_bit(QL_DMA64, &qdev->flags))
4674 ndev->features |= NETIF_F_HIGHDMA;
4675
4676 /*
4677 * Set up net_device structure.
4678 */
4679 ndev->tx_queue_len = qdev->tx_ring_size;
4680 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004681
4682 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004683 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004684 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004685
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004686 err = register_netdev(ndev);
4687 if (err) {
4688 dev_err(&pdev->dev, "net device registration failed.\n");
4689 ql_release_all(pdev);
4690 pci_disable_device(pdev);
4691 return err;
4692 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004693 /* Start up the timer to trigger EEH if
4694 * the bus goes dead
4695 */
4696 init_timer_deferrable(&qdev->timer);
4697 qdev->timer.data = (unsigned long)qdev;
4698 qdev->timer.function = ql_timer;
4699 qdev->timer.expires = jiffies + (5*HZ);
4700 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004701 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004702 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004703 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004704 cards_found++;
4705 return 0;
4706}
4707
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004708netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4709{
4710 return qlge_send(skb, ndev);
4711}
4712
4713int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4714{
4715 return ql_clean_inbound_rx_ring(rx_ring, budget);
4716}
4717
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004718static void qlge_remove(struct pci_dev *pdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004719{
4720 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004721 struct ql_adapter *qdev = netdev_priv(ndev);
4722 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004723 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004724 unregister_netdev(ndev);
4725 ql_release_all(pdev);
4726 pci_disable_device(pdev);
4727 free_netdev(ndev);
4728}
4729
Ron Mercer6d190c62009-10-28 08:39:20 +00004730/* Clean up resources without touching hardware. */
4731static void ql_eeh_close(struct net_device *ndev)
4732{
4733 int i;
4734 struct ql_adapter *qdev = netdev_priv(ndev);
4735
4736 if (netif_carrier_ok(ndev)) {
4737 netif_carrier_off(ndev);
4738 netif_stop_queue(ndev);
4739 }
4740
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004741 /* Disabling the timer */
4742 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004743 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004744
4745 for (i = 0; i < qdev->rss_ring_count; i++)
4746 netif_napi_del(&qdev->rx_ring[i].napi);
4747
4748 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4749 ql_tx_ring_clean(qdev);
4750 ql_free_rx_buffers(qdev);
4751 ql_release_adapter_resources(qdev);
4752}
4753
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004754/*
4755 * This callback is called by the PCI subsystem whenever
4756 * a PCI bus error is detected.
4757 */
4758static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4759 enum pci_channel_state state)
4760{
4761 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004762 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004763
Ron Mercer6d190c62009-10-28 08:39:20 +00004764 switch (state) {
4765 case pci_channel_io_normal:
4766 return PCI_ERS_RESULT_CAN_RECOVER;
4767 case pci_channel_io_frozen:
4768 netif_device_detach(ndev);
4769 if (netif_running(ndev))
4770 ql_eeh_close(ndev);
4771 pci_disable_device(pdev);
4772 return PCI_ERS_RESULT_NEED_RESET;
4773 case pci_channel_io_perm_failure:
4774 dev_err(&pdev->dev,
4775 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004776 ql_eeh_close(ndev);
4777 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004778 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004779 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004780
4781 /* Request a slot reset. */
4782 return PCI_ERS_RESULT_NEED_RESET;
4783}
4784
4785/*
4786 * This callback is called after the PCI buss has been reset.
4787 * Basically, this tries to restart the card from scratch.
4788 * This is a shortened version of the device probe/discovery code,
4789 * it resembles the first-half of the () routine.
4790 */
4791static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4792{
4793 struct net_device *ndev = pci_get_drvdata(pdev);
4794 struct ql_adapter *qdev = netdev_priv(ndev);
4795
Ron Mercer6d190c62009-10-28 08:39:20 +00004796 pdev->error_state = pci_channel_io_normal;
4797
4798 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004799 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004800 netif_err(qdev, ifup, qdev->ndev,
4801 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004802 return PCI_ERS_RESULT_DISCONNECT;
4803 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004804 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004805
4806 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004807 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004808 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004809 return PCI_ERS_RESULT_DISCONNECT;
4810 }
4811
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004812 return PCI_ERS_RESULT_RECOVERED;
4813}
4814
4815static void qlge_io_resume(struct pci_dev *pdev)
4816{
4817 struct net_device *ndev = pci_get_drvdata(pdev);
4818 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004819 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004820
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004821 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004822 err = qlge_open(ndev);
4823 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004824 netif_err(qdev, ifup, qdev->ndev,
4825 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004826 return;
4827 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004828 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004829 netif_err(qdev, ifup, qdev->ndev,
4830 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004831 }
Breno Leitao72046d82010-07-01 03:00:17 +00004832 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004833 netif_device_attach(ndev);
4834}
4835
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004836static const struct pci_error_handlers qlge_err_handler = {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004837 .error_detected = qlge_io_error_detected,
4838 .slot_reset = qlge_io_slot_reset,
4839 .resume = qlge_io_resume,
4840};
4841
4842static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4843{
4844 struct net_device *ndev = pci_get_drvdata(pdev);
4845 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004846 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004847
4848 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004849 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004850
4851 if (netif_running(ndev)) {
4852 err = ql_adapter_down(qdev);
4853 if (!err)
4854 return err;
4855 }
4856
Ron Mercerbc083ce2009-10-21 11:07:40 +00004857 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004858 err = pci_save_state(pdev);
4859 if (err)
4860 return err;
4861
4862 pci_disable_device(pdev);
4863
4864 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4865
4866 return 0;
4867}
4868
David S. Miller04da2cf2008-09-19 16:14:24 -07004869#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004870static int qlge_resume(struct pci_dev *pdev)
4871{
4872 struct net_device *ndev = pci_get_drvdata(pdev);
4873 struct ql_adapter *qdev = netdev_priv(ndev);
4874 int err;
4875
4876 pci_set_power_state(pdev, PCI_D0);
4877 pci_restore_state(pdev);
4878 err = pci_enable_device(pdev);
4879 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004880 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004881 return err;
4882 }
4883 pci_set_master(pdev);
4884
4885 pci_enable_wake(pdev, PCI_D3hot, 0);
4886 pci_enable_wake(pdev, PCI_D3cold, 0);
4887
4888 if (netif_running(ndev)) {
4889 err = ql_adapter_up(qdev);
4890 if (err)
4891 return err;
4892 }
4893
Breno Leitao72046d82010-07-01 03:00:17 +00004894 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004895 netif_device_attach(ndev);
4896
4897 return 0;
4898}
David S. Miller04da2cf2008-09-19 16:14:24 -07004899#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004900
4901static void qlge_shutdown(struct pci_dev *pdev)
4902{
4903 qlge_suspend(pdev, PMSG_SUSPEND);
4904}
4905
4906static struct pci_driver qlge_driver = {
4907 .name = DRV_NAME,
4908 .id_table = qlge_pci_tbl,
4909 .probe = qlge_probe,
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004910 .remove = qlge_remove,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004911#ifdef CONFIG_PM
4912 .suspend = qlge_suspend,
4913 .resume = qlge_resume,
4914#endif
4915 .shutdown = qlge_shutdown,
4916 .err_handler = &qlge_err_handler
4917};
4918
4919static int __init qlge_init_module(void)
4920{
4921 return pci_register_driver(&qlge_driver);
4922}
4923
4924static void __exit qlge_exit(void)
4925{
4926 pci_unregister_driver(&qlge_driver);
4927}
4928
4929module_init(qlge_init_module);
4930module_exit(qlge_exit);