blob: f80cd975daed2b84a31af47db58b828d28139638 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
Ron Mercer76b26692009-10-08 09:54:40 +0000408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400426 status =
427 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 status = -EPERM;
442 }
443exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 return status;
445}
446
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000447/* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452{
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000458 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000476 return status;
477}
478
Ron Mercer6a473302009-07-02 06:06:12 +0000479void ql_link_on(struct ql_adapter *qdev)
480{
Joe Perchesae9540f72010-02-09 11:49:52 +0000481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484}
485
486void ql_link_off(struct ql_adapter *qdev)
487{
Joe Perchesae9540f72010-02-09 11:49:52 +0000488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000572 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000579 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 return status;
615}
616
617static void ql_enable_interrupts(struct ql_adapter *qdev)
618{
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620}
621
622static void ql_disable_interrupts(struct ql_adapter *qdev)
623{
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625}
626
627/* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300630 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700633u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657}
658
659static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660{
661 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000671 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700672 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 var = ql_read32(qdev, STS);
676 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000678 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 return var;
680}
681
682static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683{
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696}
697
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000698static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699{
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000716
717 return csum;
718}
719
Ron Mercer26351472009-02-02 13:53:57 -0800720static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400740exit:
741 return status;
742}
743
Ron Mercercdca8d02009-03-02 08:07:31 +0000744static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745{
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000750 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
Ron Mercere4552f52009-06-09 05:39:32 +0000755 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 status = -EINVAL;
779 goto exit;
780 }
781
Ron Mercer542512e2009-06-09 05:39:33 +0000782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000801 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 qdev->ndev->addr_len);
803
804exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807}
808
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000809static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400810{
811 int i;
812 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800813 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800814 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816
817 /* Second function's parameters follow the first
818 * function's.
819 */
Ron Mercere4552f52009-06-09 05:39:32 +0000820 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000821 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800827 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 goto exit;
832 }
833
834 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400854exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857}
858
859/* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864{
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876}
877
878/* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883{
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899exit:
900 return status;
901}
902
903/* This is used for reading the 64-bit statistics regs. */
904int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905{
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920exit:
921 return status;
922}
923
Ron Mercercdca8d02009-03-02 08:07:31 +0000924static int ql_8000_port_initialize(struct ql_adapter *qdev)
925{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000926 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939exit:
940 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000941}
942
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400943/* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000949static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400950{
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964 }
965 return status;
966 }
967
Joe Perchesae9540f72010-02-09 11:49:52 +0000968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022}
1023
Ron Mercer7c734352009-10-19 03:32:19 +00001024static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025{
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027}
1028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001029/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001030static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031{
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038}
1039
Ron Mercer7c734352009-10-19 03:32:19 +00001040static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042{
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001046 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071}
1072
1073/* Update an rx ring index. */
1074static void ql_update_cq(struct rx_ring *rx_ring)
1075{
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082}
1083
1084static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085{
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087}
1088
Ron Mercer7c734352009-10-19 03:32:19 +00001089static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091{
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001136/* Process (refill) a large buffer queue. */
1137static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138{
Ron Mercer49f21862009-02-23 10:42:16 +00001139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001141 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001142 u64 map;
1143 int i;
1144
Ron Mercer7c734352009-10-19 03:32:19 +00001145 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001152 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001153 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 return;
1157 }
Ron Mercer7c734352009-10-19 03:32:19 +00001158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001163 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001164 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 }
1188}
1189
1190/* Process (refill) a small buffer queue. */
1191static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192{
Ron Mercer49f21862009-02-23 10:42:16 +00001193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001195 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001205 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001212 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1438{
1439 struct nic_stats *stats = &qdev->nic_stats;
1440
1441 stats->rx_err_count++;
1442
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1445 stats->rx_code_err++;
1446 break;
1447 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1448 stats->rx_oversize_err++;
1449 break;
1450 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1451 stats->rx_undersize_err++;
1452 break;
1453 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1454 stats->rx_preamble_err++;
1455 break;
1456 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1457 stats->rx_frame_len_err++;
1458 break;
1459 case IB_MAC_IOCB_RSP_ERR_CRC:
1460 stats->rx_crc_err++;
1461 default:
1462 break;
1463 }
1464}
1465
Ron Mercer4f848c02010-01-02 10:37:43 +00001466/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001467static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468 struct rx_ring *rx_ring,
1469 struct ib_mac_iocb_rsp *ib_mac_rsp,
1470 u32 length,
1471 u16 vlan_id)
1472{
1473 struct sk_buff *skb;
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001475 struct napi_struct *napi = &rx_ring->napi;
1476
1477 napi->dev = qdev->ndev;
1478
1479 skb = napi_get_frags(napi);
1480 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001481 netif_err(qdev, drv, qdev->ndev,
1482 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001483 rx_ring->rx_dropped++;
1484 put_page(lbq_desc->p.pg_chunk.page);
1485 return;
1486 }
1487 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001488 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1489 lbq_desc->p.pg_chunk.page,
1490 lbq_desc->p.pg_chunk.offset,
1491 length);
Ron Mercer63526712010-01-02 10:37:44 +00001492
1493 skb->len += length;
1494 skb->data_len += length;
1495 skb->truesize += length;
1496 skb_shinfo(skb)->nr_frags++;
1497
1498 rx_ring->rx_packets++;
1499 rx_ring->rx_bytes += length;
1500 skb->ip_summed = CHECKSUM_UNNECESSARY;
1501 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001502 if (vlan_id != 0xffff)
1503 __vlan_hwaccel_put_tag(skb, vlan_id);
1504 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001505}
1506
1507/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001508static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1509 struct rx_ring *rx_ring,
1510 struct ib_mac_iocb_rsp *ib_mac_rsp,
1511 u32 length,
1512 u16 vlan_id)
1513{
1514 struct net_device *ndev = qdev->ndev;
1515 struct sk_buff *skb = NULL;
1516 void *addr;
1517 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1518 struct napi_struct *napi = &rx_ring->napi;
1519
1520 skb = netdev_alloc_skb(ndev, length);
1521 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001522 netif_err(qdev, drv, qdev->ndev,
1523 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001524 rx_ring->rx_dropped++;
1525 put_page(lbq_desc->p.pg_chunk.page);
1526 return;
1527 }
1528
1529 addr = lbq_desc->p.pg_chunk.va;
1530 prefetch(addr);
1531
Ron Mercer4f848c02010-01-02 10:37:43 +00001532 /* The max framesize filter on this chip is set higher than
1533 * MTU since FCoE uses 2k frames.
1534 */
1535 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001536 netif_err(qdev, drv, qdev->ndev,
1537 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001538 rx_ring->rx_dropped++;
1539 goto err_out;
1540 }
1541 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001542 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1543 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1544 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001545 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1546 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1547 length-ETH_HLEN);
1548 skb->len += length-ETH_HLEN;
1549 skb->data_len += length-ETH_HLEN;
1550 skb->truesize += length-ETH_HLEN;
1551
1552 rx_ring->rx_packets++;
1553 rx_ring->rx_bytes += skb->len;
1554 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001555 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001556
Michał Mirosław88230fd2011-04-18 13:31:21 +00001557 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001558 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1559 /* TCP frame. */
1560 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001561 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001563 skb->ip_summed = CHECKSUM_UNNECESSARY;
1564 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1565 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1566 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001567 struct iphdr *iph =
1568 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001569 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001570 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001571 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001572 netif_printk(qdev, rx_status, KERN_DEBUG,
1573 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001574 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001575 }
1576 }
1577 }
1578
1579 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001580 if (vlan_id != 0xffff)
1581 __vlan_hwaccel_put_tag(skb, vlan_id);
1582 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1583 napi_gro_receive(napi, skb);
1584 else
1585 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001586 return;
1587err_out:
1588 dev_kfree_skb_any(skb);
1589 put_page(lbq_desc->p.pg_chunk.page);
1590}
1591
1592/* Process an inbound completion from an rx ring. */
1593static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1594 struct rx_ring *rx_ring,
1595 struct ib_mac_iocb_rsp *ib_mac_rsp,
1596 u32 length,
1597 u16 vlan_id)
1598{
1599 struct net_device *ndev = qdev->ndev;
1600 struct sk_buff *skb = NULL;
1601 struct sk_buff *new_skb = NULL;
1602 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1603
1604 skb = sbq_desc->p.skb;
1605 /* Allocate new_skb and copy */
1606 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1607 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001608 netif_err(qdev, probe, qdev->ndev,
1609 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001610 rx_ring->rx_dropped++;
1611 return;
1612 }
1613 skb_reserve(new_skb, NET_IP_ALIGN);
1614 memcpy(skb_put(new_skb, length), skb->data, length);
1615 skb = new_skb;
1616
Ron Mercer4f848c02010-01-02 10:37:43 +00001617 /* loopback self test for ethtool */
1618 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1619 ql_check_lb_frame(qdev, skb);
1620 dev_kfree_skb_any(skb);
1621 return;
1622 }
1623
1624 /* The max framesize filter on this chip is set higher than
1625 * MTU since FCoE uses 2k frames.
1626 */
1627 if (skb->len > ndev->mtu + ETH_HLEN) {
1628 dev_kfree_skb_any(skb);
1629 rx_ring->rx_dropped++;
1630 return;
1631 }
1632
1633 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001634 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001635 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1636 "%s Multicast.\n",
1637 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1638 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1639 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001643 }
1644 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001645 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001647
1648 rx_ring->rx_packets++;
1649 rx_ring->rx_bytes += skb->len;
1650 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001651 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001652
1653 /* If rx checksum is on, and there are no
1654 * csum or frame errors.
1655 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001656 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001657 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1658 /* TCP frame. */
1659 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001660 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1661 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001662 skb->ip_summed = CHECKSUM_UNNECESSARY;
1663 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665 /* Unfragmented ipv4 UDP frame. */
1666 struct iphdr *iph = (struct iphdr *) skb->data;
1667 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001668 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001669 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001670 netif_printk(qdev, rx_status, KERN_DEBUG,
1671 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001672 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001673 }
1674 }
1675 }
1676
1677 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001678 if (vlan_id != 0xffff)
1679 __vlan_hwaccel_put_tag(skb, vlan_id);
1680 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1681 napi_gro_receive(&rx_ring->napi, skb);
1682 else
1683 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001684}
1685
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001686static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001687{
1688 void *temp_addr = skb->data;
1689
1690 /* Undo the skb_reserve(skb,32) we did before
1691 * giving to hardware, and realign data on
1692 * a 2-byte boundary.
1693 */
1694 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1695 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1696 skb_copy_to_linear_data(skb, temp_addr,
1697 (unsigned int)len);
1698}
1699
1700/*
1701 * This function builds an skb for the given inbound
1702 * completion. It will be rewritten for readability in the near
1703 * future, but for not it works well.
1704 */
1705static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1706 struct rx_ring *rx_ring,
1707 struct ib_mac_iocb_rsp *ib_mac_rsp)
1708{
1709 struct bq_desc *lbq_desc;
1710 struct bq_desc *sbq_desc;
1711 struct sk_buff *skb = NULL;
1712 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1713 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1714
1715 /*
1716 * Handle the header buffer if present.
1717 */
1718 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1719 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001720 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1721 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001722 /*
1723 * Headers fit nicely into a small buffer.
1724 */
1725 sbq_desc = ql_get_curr_sbuf(rx_ring);
1726 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001727 dma_unmap_addr(sbq_desc, mapaddr),
1728 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001729 PCI_DMA_FROMDEVICE);
1730 skb = sbq_desc->p.skb;
1731 ql_realign_skb(skb, hdr_len);
1732 skb_put(skb, hdr_len);
1733 sbq_desc->p.skb = NULL;
1734 }
1735
1736 /*
1737 * Handle the data buffer(s).
1738 */
1739 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001740 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001742 return skb;
1743 }
1744
1745 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1746 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001747 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748 "Headers in small, data of %d bytes in small, combine them.\n",
1749 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 /*
1751 * Data is less than small buffer size so it's
1752 * stuffed in a small buffer.
1753 * For this case we append the data
1754 * from the "data" small buffer to the "header" small
1755 * buffer.
1756 */
1757 sbq_desc = ql_get_curr_sbuf(rx_ring);
1758 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001759 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001761 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001762 (sbq_desc, maplen),
1763 PCI_DMA_FROMDEVICE);
1764 memcpy(skb_put(skb, length),
1765 sbq_desc->p.skb->data, length);
1766 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001767 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001768 (sbq_desc,
1769 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001770 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001771 (sbq_desc,
1772 maplen),
1773 PCI_DMA_FROMDEVICE);
1774 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001775 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776 "%d bytes in a single small buffer.\n",
1777 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001778 sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 skb = sbq_desc->p.skb;
1780 ql_realign_skb(skb, length);
1781 skb_put(skb, length);
1782 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001783 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001784 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001785 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001786 maplen),
1787 PCI_DMA_FROMDEVICE);
1788 sbq_desc->p.skb = NULL;
1789 }
1790 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 "Header in small, %d bytes in large. Chain large to small!\n",
1794 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 /*
1796 * The data is in a single large buffer. We
1797 * chain it to the header buffer's skb and let
1798 * it rip.
1799 */
Ron Mercer7c734352009-10-19 03:32:19 +00001800 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001801 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802 "Chaining page at offset = %d, for %d bytes to skb.\n",
1803 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001804 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1805 lbq_desc->p.pg_chunk.offset,
1806 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001807 skb->len += length;
1808 skb->data_len += length;
1809 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001810 } else {
1811 /*
1812 * The headers and data are in a single large buffer. We
1813 * copy it to a new skb and let it go. This can happen with
1814 * jumbo mtu on a non-TCP/UDP frame.
1815 */
Ron Mercer7c734352009-10-19 03:32:19 +00001816 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001817 skb = netdev_alloc_skb(qdev->ndev, length);
1818 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001819 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1820 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001821 return NULL;
1822 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001823 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001824 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001825 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001826 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001827 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1831 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001832 skb_fill_page_desc(skb, 0,
1833 lbq_desc->p.pg_chunk.page,
1834 lbq_desc->p.pg_chunk.offset,
1835 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001836 skb->len += length;
1837 skb->data_len += length;
1838 skb->truesize += length;
1839 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001840 __pskb_pull_tail(skb,
1841 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1842 VLAN_ETH_HLEN : ETH_HLEN);
1843 }
1844 } else {
1845 /*
1846 * The data is in a chain of large buffers
1847 * pointed to by a small buffer. We loop
1848 * thru and chain them to the our small header
1849 * buffer's skb.
1850 * frags: There are 18 max frags and our small
1851 * buffer will hold 32 of them. The thing is,
1852 * we'll use 3 max for our 9000 byte jumbo
1853 * frames. If the MTU goes up we could
1854 * eventually be in trouble.
1855 */
Ron Mercer7c734352009-10-19 03:32:19 +00001856 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001857 sbq_desc = ql_get_curr_sbuf(rx_ring);
1858 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001859 dma_unmap_addr(sbq_desc, mapaddr),
1860 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001861 PCI_DMA_FROMDEVICE);
1862 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1863 /*
1864 * This is an non TCP/UDP IP frame, so
1865 * the headers aren't split into a small
1866 * buffer. We have to use the small buffer
1867 * that contains our sg list as our skb to
1868 * send upstairs. Copy the sg list here to
1869 * a local buffer and use it to find the
1870 * pages to chain.
1871 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001872 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873 "%d bytes of headers & data in chain of large.\n",
1874 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001875 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001876 sbq_desc->p.skb = NULL;
1877 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001878 }
1879 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001880 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1881 size = (length < rx_ring->lbq_buf_size) ? length :
1882 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001883
Joe Perchesae9540f72010-02-09 11:49:52 +00001884 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885 "Adding page %d to skb for %d bytes.\n",
1886 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001887 skb_fill_page_desc(skb, i,
1888 lbq_desc->p.pg_chunk.page,
1889 lbq_desc->p.pg_chunk.offset,
1890 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001891 skb->len += size;
1892 skb->data_len += size;
1893 skb->truesize += size;
1894 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001895 i++;
1896 }
1897 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1898 VLAN_ETH_HLEN : ETH_HLEN);
1899 }
1900 return skb;
1901}
1902
1903/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001904static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001905 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001906 struct ib_mac_iocb_rsp *ib_mac_rsp,
1907 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001908{
1909 struct net_device *ndev = qdev->ndev;
1910 struct sk_buff *skb = NULL;
1911
1912 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1913
1914 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1915 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001916 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001918 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001919 return;
1920 }
1921
Ron Mercerec33a492009-06-09 05:39:28 +00001922 /* The max framesize filter on this chip is set higher than
1923 * MTU since FCoE uses 2k frames.
1924 */
1925 if (skb->len > ndev->mtu + ETH_HLEN) {
1926 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001927 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001928 return;
1929 }
1930
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001931 /* loopback self test for ethtool */
1932 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1933 ql_check_lb_frame(qdev, skb);
1934 dev_kfree_skb_any(skb);
1935 return;
1936 }
1937
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001938 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001939 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1941 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1942 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1943 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1944 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1945 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1946 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001947 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001948 }
1949 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001950 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001952 }
Ron Mercerd555f592009-03-09 10:59:19 +00001953
Ron Mercerd555f592009-03-09 10:59:19 +00001954 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001955 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001956
1957 /* If rx checksum is on, and there are no
1958 * csum or frame errors.
1959 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001960 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001961 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1962 /* TCP frame. */
1963 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001964 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001966 skb->ip_summed = CHECKSUM_UNNECESSARY;
1967 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1968 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1969 /* Unfragmented ipv4 UDP frame. */
1970 struct iphdr *iph = (struct iphdr *) skb->data;
1971 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001972 htons(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00001973 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001974 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1975 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001976 }
1977 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001978 }
Ron Mercerd555f592009-03-09 10:59:19 +00001979
Ron Mercer885ee392009-11-03 13:49:31 +00001980 rx_ring->rx_packets++;
1981 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001982 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001983 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1984 __vlan_hwaccel_put_tag(skb, vlan_id);
1985 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1986 napi_gro_receive(&rx_ring->napi, skb);
1987 else
1988 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001989}
1990
Ron Mercer4f848c02010-01-02 10:37:43 +00001991/* Process an inbound completion from an rx ring. */
1992static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1993 struct rx_ring *rx_ring,
1994 struct ib_mac_iocb_rsp *ib_mac_rsp)
1995{
1996 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1997 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1998 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1999 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2000
2001 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2002
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00002003 /* Frame error, so drop the packet. */
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006 return (unsigned long)length;
2007 }
2008
Ron Mercer4f848c02010-01-02 10:37:43 +00002009 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2010 /* The data and headers are split into
2011 * separate buffers.
2012 */
2013 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2014 vlan_id);
2015 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2016 /* The data fit in a single small buffer.
2017 * Allocate a new skb, copy the data and
2018 * return the buffer to the free pool.
2019 */
2020 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2021 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002022 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2023 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2024 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2025 /* TCP packet in a page chunk that's been checksummed.
2026 * Tack it on to our GRO skb and let it go.
2027 */
2028 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2029 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002030 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2031 /* Non-TCP packet in a page chunk. Allocate an
2032 * skb, tack it on frags, and send it up.
2033 */
2034 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2035 length, vlan_id);
2036 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002037 /* Non-TCP/UDP large frames that span multiple buffers
2038 * can be processed corrrectly by the split frame logic.
2039 */
2040 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2041 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002042 }
2043
2044 return (unsigned long)length;
2045}
2046
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002047/* Process an outbound completion from an rx ring. */
2048static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2049 struct ob_mac_iocb_rsp *mac_rsp)
2050{
2051 struct tx_ring *tx_ring;
2052 struct tx_ring_desc *tx_ring_desc;
2053
2054 QL_DUMP_OB_MAC_RSP(mac_rsp);
2055 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2056 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2057 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002058 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2059 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002060 dev_kfree_skb(tx_ring_desc->skb);
2061 tx_ring_desc->skb = NULL;
2062
2063 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2064 OB_MAC_IOCB_RSP_S |
2065 OB_MAC_IOCB_RSP_L |
2066 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2067 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002068 netif_warn(qdev, tx_done, qdev->ndev,
2069 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002070 }
2071 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002072 netif_warn(qdev, tx_done, qdev->ndev,
2073 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002074 }
2075 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002076 netif_warn(qdev, tx_done, qdev->ndev,
2077 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002078 }
2079 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002080 netif_warn(qdev, tx_done, qdev->ndev,
2081 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002082 }
2083 }
2084 atomic_inc(&tx_ring->tx_count);
2085}
2086
2087/* Fire up a handler to reset the MPI processor. */
2088void ql_queue_fw_error(struct ql_adapter *qdev)
2089{
Ron Mercer6a473302009-07-02 06:06:12 +00002090 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002091 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2092}
2093
2094void ql_queue_asic_error(struct ql_adapter *qdev)
2095{
Ron Mercer6a473302009-07-02 06:06:12 +00002096 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002097 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002098 /* Clear adapter up bit to signal the recovery
2099 * process that it shouldn't kill the reset worker
2100 * thread
2101 */
2102 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002103 /* Set asic recovery bit to indicate reset process that we are
2104 * in fatal error recovery process rather than normal close
2105 */
2106 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002107 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2108}
2109
2110static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2111 struct ib_ae_iocb_rsp *ib_ae_rsp)
2112{
2113 switch (ib_ae_rsp->event) {
2114 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002115 netif_err(qdev, rx_err, qdev->ndev,
2116 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002117 ql_queue_fw_error(qdev);
2118 return;
2119
2120 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002121 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2122 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 ql_queue_asic_error(qdev);
2124 return;
2125
2126 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002127 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002128 ql_queue_asic_error(qdev);
2129 break;
2130
2131 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002132 netdev_err(qdev->ndev, "PCI error occurred when reading "
2133 "anonymous buffers from rx_ring %d.\n",
2134 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002135 ql_queue_asic_error(qdev);
2136 break;
2137
2138 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002139 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2140 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002141 ql_queue_asic_error(qdev);
2142 break;
2143 }
2144}
2145
2146static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2147{
2148 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002149 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002150 struct ob_mac_iocb_rsp *net_rsp = NULL;
2151 int count = 0;
2152
Ron Mercer1e213302009-03-09 10:59:21 +00002153 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002154 /* While there are entries in the completion queue. */
2155 while (prod != rx_ring->cnsmr_idx) {
2156
Joe Perchesae9540f72010-02-09 11:49:52 +00002157 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2158 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2159 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160
2161 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2162 rmb();
2163 switch (net_rsp->opcode) {
2164
2165 case OPCODE_OB_MAC_TSO_IOCB:
2166 case OPCODE_OB_MAC_IOCB:
2167 ql_process_mac_tx_intr(qdev, net_rsp);
2168 break;
2169 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002170 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2171 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2172 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002173 }
2174 count++;
2175 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002176 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002177 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002178 if (!net_rsp)
2179 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002180 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002181 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002182 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002183 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002184 /*
2185 * The queue got stopped because the tx_ring was full.
2186 * Wake it up, because it's now at least 25% empty.
2187 */
Ron Mercer1e213302009-03-09 10:59:21 +00002188 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002189 }
2190
2191 return count;
2192}
2193
2194static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2195{
2196 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002197 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002198 struct ql_net_rsp_iocb *net_rsp;
2199 int count = 0;
2200
2201 /* While there are entries in the completion queue. */
2202 while (prod != rx_ring->cnsmr_idx) {
2203
Joe Perchesae9540f72010-02-09 11:49:52 +00002204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002207
2208 net_rsp = rx_ring->curr_entry;
2209 rmb();
2210 switch (net_rsp->opcode) {
2211 case OPCODE_IB_MAC_IOCB:
2212 ql_process_mac_rx_intr(qdev, rx_ring,
2213 (struct ib_mac_iocb_rsp *)
2214 net_rsp);
2215 break;
2216
2217 case OPCODE_IB_AE_IOCB:
2218 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2219 net_rsp);
2220 break;
2221 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002222 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2223 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2224 net_rsp->opcode);
2225 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002226 }
2227 count++;
2228 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002229 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002230 if (count == budget)
2231 break;
2232 }
2233 ql_update_buffer_queues(qdev, rx_ring);
2234 ql_write_cq_idx(rx_ring);
2235 return count;
2236}
2237
2238static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2239{
2240 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2241 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002242 struct rx_ring *trx_ring;
2243 int i, work_done = 0;
2244 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002245
Joe Perchesae9540f72010-02-09 11:49:52 +00002246 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2247 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002248
Ron Mercer39aa8162009-08-27 11:02:11 +00002249 /* Service the TX rings first. They start
2250 * right after the RSS rings. */
2251 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2252 trx_ring = &qdev->rx_ring[i];
2253 /* If this TX completion ring belongs to this vector and
2254 * it's not empty then service it.
2255 */
2256 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2257 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2258 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002259 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2260 "%s: Servicing TX completion ring %d.\n",
2261 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002262 ql_clean_outbound_rx_ring(trx_ring);
2263 }
2264 }
2265
2266 /*
2267 * Now service the RSS ring if it's active.
2268 */
2269 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2270 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002271 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2272 "%s: Servicing RX completion ring %d.\n",
2273 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002274 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2275 }
2276
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002277 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002278 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002279 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2280 }
2281 return work_done;
2282}
2283
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002284static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002285{
2286 struct ql_adapter *qdev = netdev_priv(ndev);
2287
Jiri Pirko18c49b92011-07-21 03:24:11 +00002288 if (features & NETIF_F_HW_VLAN_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002289 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002290 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002291 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002292 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2293 }
2294}
2295
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002296static netdev_features_t qlge_fix_features(struct net_device *ndev,
2297 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002298{
2299 /*
2300 * Since there is no support for separate rx/tx vlan accel
2301 * enable/disable make sure tx flag is always in same state as rx.
2302 */
2303 if (features & NETIF_F_HW_VLAN_RX)
2304 features |= NETIF_F_HW_VLAN_TX;
2305 else
2306 features &= ~NETIF_F_HW_VLAN_TX;
2307
2308 return features;
2309}
2310
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002311static int qlge_set_features(struct net_device *ndev,
2312 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002313{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002314 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002315
2316 if (changed & NETIF_F_HW_VLAN_RX)
2317 qlge_vlan_mode(ndev, features);
2318
2319 return 0;
2320}
2321
Jiri Pirko8e586132011-12-08 19:52:37 -05002322static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002323{
2324 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002325 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002326
Jiri Pirko8e586132011-12-08 19:52:37 -05002327 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2329 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002332 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002333}
2334
Jiri Pirko8e586132011-12-08 19:52:37 -05002335static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002336{
2337 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002338 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002339 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002340
Ron Mercercc288f52009-02-23 10:42:14 +00002341 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2342 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002343 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002344
Jiri Pirko8e586132011-12-08 19:52:37 -05002345 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002346 set_bit(vid, qdev->active_vlans);
2347
Ron Mercercc288f52009-02-23 10:42:14 +00002348 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002349
2350 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002351}
2352
Jiri Pirko8e586132011-12-08 19:52:37 -05002353static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002354{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002355 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002356 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002357
Jiri Pirko8e586132011-12-08 19:52:37 -05002358 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2359 MAC_ADDR_TYPE_VLAN, vid);
2360 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002361 netif_err(qdev, ifup, qdev->ndev,
2362 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002363 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002364}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002365
Jiri Pirko8e586132011-12-08 19:52:37 -05002366static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002367{
2368 struct ql_adapter *qdev = netdev_priv(ndev);
2369 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002370 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002371
2372 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002374 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002375
Jiri Pirko8e586132011-12-08 19:52:37 -05002376 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002377 clear_bit(vid, qdev->active_vlans);
2378
2379 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002380
2381 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002382}
2383
Ron Mercerc1b60092010-10-27 04:58:12 +00002384static void qlge_restore_vlan(struct ql_adapter *qdev)
2385{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002386 int status;
2387 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002388
Jiri Pirko18c49b92011-07-21 03:24:11 +00002389 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2390 if (status)
2391 return;
2392
2393 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2394 __qlge_vlan_rx_add_vid(qdev, vid);
2395
2396 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002397}
2398
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002399/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2400static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2401{
2402 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002403 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002404 return IRQ_HANDLED;
2405}
2406
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002407/* This handles a fatal error, MPI activity, and the default
2408 * rx_ring in an MSI-X multiple vector environment.
2409 * In MSI/Legacy environment it also process the rest of
2410 * the rx_rings.
2411 */
2412static irqreturn_t qlge_isr(int irq, void *dev_id)
2413{
2414 struct rx_ring *rx_ring = dev_id;
2415 struct ql_adapter *qdev = rx_ring->qdev;
2416 struct intr_context *intr_context = &qdev->intr_context[0];
2417 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002418 int work_done = 0;
2419
Ron Mercerbb0d2152008-10-20 10:30:26 -07002420 spin_lock(&qdev->hw_lock);
2421 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002422 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2423 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002424 spin_unlock(&qdev->hw_lock);
2425 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002426 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002427 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002428
Ron Mercerbb0d2152008-10-20 10:30:26 -07002429 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002430
2431 /*
2432 * Check for fatal error.
2433 */
2434 if (var & STS_FE) {
2435 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002436 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002437 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002438 netdev_err(qdev->ndev, "Resetting chip. "
2439 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002440 return IRQ_HANDLED;
2441 }
2442
2443 /*
2444 * Check MPI processor activity.
2445 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002446 if ((var & STS_PI) &&
2447 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448 /*
2449 * We've got an async event or mailbox completion.
2450 * Handle it and clear the source of the interrupt.
2451 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002452 netif_err(qdev, intr, qdev->ndev,
2453 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002454 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002455 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2456 queue_delayed_work_on(smp_processor_id(),
2457 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002458 work_done++;
2459 }
2460
2461 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002462 * Get the bit-mask that shows the active queues for this
2463 * pass. Compare it to the queues that this irq services
2464 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002465 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002466 var = ql_read32(qdev, ISR1);
2467 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002468 netif_info(qdev, intr, qdev->ndev,
2469 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002470 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002471 napi_schedule(&rx_ring->napi);
2472 work_done++;
2473 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002474 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002475 return work_done ? IRQ_HANDLED : IRQ_NONE;
2476}
2477
2478static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2479{
2480
2481 if (skb_is_gso(skb)) {
2482 int err;
2483 if (skb_header_cloned(skb)) {
2484 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2485 if (err)
2486 return err;
2487 }
2488
2489 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2490 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2491 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2492 mac_iocb_ptr->total_hdrs_len =
2493 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2494 mac_iocb_ptr->net_trans_offset =
2495 cpu_to_le16(skb_network_offset(skb) |
2496 skb_transport_offset(skb)
2497 << OB_MAC_TRANSPORT_HDR_SHIFT);
2498 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2499 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2500 if (likely(skb->protocol == htons(ETH_P_IP))) {
2501 struct iphdr *iph = ip_hdr(skb);
2502 iph->check = 0;
2503 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2504 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2505 iph->daddr, 0,
2506 IPPROTO_TCP,
2507 0);
2508 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2510 tcp_hdr(skb)->check =
2511 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2512 &ipv6_hdr(skb)->daddr,
2513 0, IPPROTO_TCP, 0);
2514 }
2515 return 1;
2516 }
2517 return 0;
2518}
2519
2520static void ql_hw_csum_setup(struct sk_buff *skb,
2521 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2522{
2523 int len;
2524 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002525 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002526 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2527 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2528 mac_iocb_ptr->net_trans_offset =
2529 cpu_to_le16(skb_network_offset(skb) |
2530 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2531
2532 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2534 if (likely(iph->protocol == IPPROTO_TCP)) {
2535 check = &(tcp_hdr(skb)->check);
2536 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2537 mac_iocb_ptr->total_hdrs_len =
2538 cpu_to_le16(skb_transport_offset(skb) +
2539 (tcp_hdr(skb)->doff << 2));
2540 } else {
2541 check = &(udp_hdr(skb)->check);
2542 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2543 mac_iocb_ptr->total_hdrs_len =
2544 cpu_to_le16(skb_transport_offset(skb) +
2545 sizeof(struct udphdr));
2546 }
2547 *check = ~csum_tcpudp_magic(iph->saddr,
2548 iph->daddr, len, iph->protocol, 0);
2549}
2550
Stephen Hemminger613573252009-08-31 19:50:58 +00002551static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002552{
2553 struct tx_ring_desc *tx_ring_desc;
2554 struct ob_mac_iocb_req *mac_iocb_ptr;
2555 struct ql_adapter *qdev = netdev_priv(ndev);
2556 int tso;
2557 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002558 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002559
2560 tx_ring = &qdev->tx_ring[tx_ring_idx];
2561
Ron Mercer74c50b42009-03-09 10:59:27 +00002562 if (skb_padto(skb, ETH_ZLEN))
2563 return NETDEV_TX_OK;
2564
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002565 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002566 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002567 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002568 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002569 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002570 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002571 return NETDEV_TX_BUSY;
2572 }
2573 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2574 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002575 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002576
2577 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2578 mac_iocb_ptr->tid = tx_ring_desc->index;
2579 /* We use the upper 32-bits to store the tx queue for this IO.
2580 * When we get the completion we can use it to establish the context.
2581 */
2582 mac_iocb_ptr->txq_idx = tx_ring_idx;
2583 tx_ring_desc->skb = skb;
2584
2585 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2586
Jesse Grosseab6d182010-10-20 13:56:03 +00002587 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002588 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002590 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2591 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2592 }
2593 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2594 if (tso < 0) {
2595 dev_kfree_skb_any(skb);
2596 return NETDEV_TX_OK;
2597 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2598 ql_hw_csum_setup(skb,
2599 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2600 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002601 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2602 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002603 netif_err(qdev, tx_queued, qdev->ndev,
2604 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002605 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002606 return NETDEV_TX_BUSY;
2607 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002608 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2609 tx_ring->prod_idx++;
2610 if (tx_ring->prod_idx == tx_ring->wq_len)
2611 tx_ring->prod_idx = 0;
2612 wmb();
2613
2614 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002615 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2616 "tx queued, slot %d, len %d\n",
2617 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002618
2619 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002620
2621 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622 netif_stop_subqueue(ndev, tx_ring->wq_id);
2623 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2624 /*
2625 * The queue got stopped because the tx_ring was full.
2626 * Wake it up, because it's now at least 25% empty.
2627 */
2628 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2629 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002630 return NETDEV_TX_OK;
2631}
2632
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002633
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002634static void ql_free_shadow_space(struct ql_adapter *qdev)
2635{
2636 if (qdev->rx_ring_shadow_reg_area) {
2637 pci_free_consistent(qdev->pdev,
2638 PAGE_SIZE,
2639 qdev->rx_ring_shadow_reg_area,
2640 qdev->rx_ring_shadow_reg_dma);
2641 qdev->rx_ring_shadow_reg_area = NULL;
2642 }
2643 if (qdev->tx_ring_shadow_reg_area) {
2644 pci_free_consistent(qdev->pdev,
2645 PAGE_SIZE,
2646 qdev->tx_ring_shadow_reg_area,
2647 qdev->tx_ring_shadow_reg_dma);
2648 qdev->tx_ring_shadow_reg_area = NULL;
2649 }
2650}
2651
2652static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2653{
2654 qdev->rx_ring_shadow_reg_area =
2655 pci_alloc_consistent(qdev->pdev,
2656 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2657 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002658 netif_err(qdev, ifup, qdev->ndev,
2659 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002660 return -ENOMEM;
2661 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002662 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002663 qdev->tx_ring_shadow_reg_area =
2664 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2665 &qdev->tx_ring_shadow_reg_dma);
2666 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002667 netif_err(qdev, ifup, qdev->ndev,
2668 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002669 goto err_wqp_sh_area;
2670 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002671 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002672 return 0;
2673
2674err_wqp_sh_area:
2675 pci_free_consistent(qdev->pdev,
2676 PAGE_SIZE,
2677 qdev->rx_ring_shadow_reg_area,
2678 qdev->rx_ring_shadow_reg_dma);
2679 return -ENOMEM;
2680}
2681
2682static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2683{
2684 struct tx_ring_desc *tx_ring_desc;
2685 int i;
2686 struct ob_mac_iocb_req *mac_iocb_ptr;
2687
2688 mac_iocb_ptr = tx_ring->wq_base;
2689 tx_ring_desc = tx_ring->q;
2690 for (i = 0; i < tx_ring->wq_len; i++) {
2691 tx_ring_desc->index = i;
2692 tx_ring_desc->skb = NULL;
2693 tx_ring_desc->queue_entry = mac_iocb_ptr;
2694 mac_iocb_ptr++;
2695 tx_ring_desc++;
2696 }
2697 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002698}
2699
2700static void ql_free_tx_resources(struct ql_adapter *qdev,
2701 struct tx_ring *tx_ring)
2702{
2703 if (tx_ring->wq_base) {
2704 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2705 tx_ring->wq_base, tx_ring->wq_base_dma);
2706 tx_ring->wq_base = NULL;
2707 }
2708 kfree(tx_ring->q);
2709 tx_ring->q = NULL;
2710}
2711
2712static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2713 struct tx_ring *tx_ring)
2714{
2715 tx_ring->wq_base =
2716 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2717 &tx_ring->wq_base_dma);
2718
Joe Perches8e95a202009-12-03 07:58:21 +00002719 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002720 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2721 goto pci_alloc_err;
2722
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002723 tx_ring->q =
2724 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2725 if (tx_ring->q == NULL)
2726 goto err;
2727
2728 return 0;
2729err:
2730 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2731 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002732 tx_ring->wq_base = NULL;
2733pci_alloc_err:
2734 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002735 return -ENOMEM;
2736}
2737
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002738static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002739{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002740 struct bq_desc *lbq_desc;
2741
Ron Mercer7c734352009-10-19 03:32:19 +00002742 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002743
Ron Mercer7c734352009-10-19 03:32:19 +00002744 curr_idx = rx_ring->lbq_curr_idx;
2745 clean_idx = rx_ring->lbq_clean_idx;
2746 while (curr_idx != clean_idx) {
2747 lbq_desc = &rx_ring->lbq[curr_idx];
2748
2749 if (lbq_desc->p.pg_chunk.last_flag) {
2750 pci_unmap_page(qdev->pdev,
2751 lbq_desc->p.pg_chunk.map,
2752 ql_lbq_block_size(qdev),
2753 PCI_DMA_FROMDEVICE);
2754 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002755 }
Ron Mercer7c734352009-10-19 03:32:19 +00002756
2757 put_page(lbq_desc->p.pg_chunk.page);
2758 lbq_desc->p.pg_chunk.page = NULL;
2759
2760 if (++curr_idx == rx_ring->lbq_len)
2761 curr_idx = 0;
2762
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002763 }
2764}
2765
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002766static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002767{
2768 int i;
2769 struct bq_desc *sbq_desc;
2770
2771 for (i = 0; i < rx_ring->sbq_len; i++) {
2772 sbq_desc = &rx_ring->sbq[i];
2773 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002774 netif_err(qdev, ifup, qdev->ndev,
2775 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002776 return;
2777 }
2778 if (sbq_desc->p.skb) {
2779 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002780 dma_unmap_addr(sbq_desc, mapaddr),
2781 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002782 PCI_DMA_FROMDEVICE);
2783 dev_kfree_skb(sbq_desc->p.skb);
2784 sbq_desc->p.skb = NULL;
2785 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002786 }
2787}
2788
Ron Mercer4545a3f2009-02-23 10:42:17 +00002789/* Free all large and small rx buffers associated
2790 * with the completion queues for this device.
2791 */
2792static void ql_free_rx_buffers(struct ql_adapter *qdev)
2793{
2794 int i;
2795 struct rx_ring *rx_ring;
2796
2797 for (i = 0; i < qdev->rx_ring_count; i++) {
2798 rx_ring = &qdev->rx_ring[i];
2799 if (rx_ring->lbq)
2800 ql_free_lbq_buffers(qdev, rx_ring);
2801 if (rx_ring->sbq)
2802 ql_free_sbq_buffers(qdev, rx_ring);
2803 }
2804}
2805
2806static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2807{
2808 struct rx_ring *rx_ring;
2809 int i;
2810
2811 for (i = 0; i < qdev->rx_ring_count; i++) {
2812 rx_ring = &qdev->rx_ring[i];
2813 if (rx_ring->type != TX_Q)
2814 ql_update_buffer_queues(qdev, rx_ring);
2815 }
2816}
2817
2818static void ql_init_lbq_ring(struct ql_adapter *qdev,
2819 struct rx_ring *rx_ring)
2820{
2821 int i;
2822 struct bq_desc *lbq_desc;
2823 __le64 *bq = rx_ring->lbq_base;
2824
2825 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2826 for (i = 0; i < rx_ring->lbq_len; i++) {
2827 lbq_desc = &rx_ring->lbq[i];
2828 memset(lbq_desc, 0, sizeof(*lbq_desc));
2829 lbq_desc->index = i;
2830 lbq_desc->addr = bq;
2831 bq++;
2832 }
2833}
2834
2835static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002836 struct rx_ring *rx_ring)
2837{
2838 int i;
2839 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002840 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002841
Ron Mercer4545a3f2009-02-23 10:42:17 +00002842 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002843 for (i = 0; i < rx_ring->sbq_len; i++) {
2844 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002845 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002846 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002847 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002848 bq++;
2849 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002850}
2851
2852static void ql_free_rx_resources(struct ql_adapter *qdev,
2853 struct rx_ring *rx_ring)
2854{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002855 /* Free the small buffer queue. */
2856 if (rx_ring->sbq_base) {
2857 pci_free_consistent(qdev->pdev,
2858 rx_ring->sbq_size,
2859 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2860 rx_ring->sbq_base = NULL;
2861 }
2862
2863 /* Free the small buffer queue control blocks. */
2864 kfree(rx_ring->sbq);
2865 rx_ring->sbq = NULL;
2866
2867 /* Free the large buffer queue. */
2868 if (rx_ring->lbq_base) {
2869 pci_free_consistent(qdev->pdev,
2870 rx_ring->lbq_size,
2871 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2872 rx_ring->lbq_base = NULL;
2873 }
2874
2875 /* Free the large buffer queue control blocks. */
2876 kfree(rx_ring->lbq);
2877 rx_ring->lbq = NULL;
2878
2879 /* Free the rx queue. */
2880 if (rx_ring->cq_base) {
2881 pci_free_consistent(qdev->pdev,
2882 rx_ring->cq_size,
2883 rx_ring->cq_base, rx_ring->cq_base_dma);
2884 rx_ring->cq_base = NULL;
2885 }
2886}
2887
2888/* Allocate queues and buffers for this completions queue based
2889 * on the values in the parameter structure. */
2890static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2891 struct rx_ring *rx_ring)
2892{
2893
2894 /*
2895 * Allocate the completion queue for this rx_ring.
2896 */
2897 rx_ring->cq_base =
2898 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2899 &rx_ring->cq_base_dma);
2900
2901 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002902 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002903 return -ENOMEM;
2904 }
2905
2906 if (rx_ring->sbq_len) {
2907 /*
2908 * Allocate small buffer queue.
2909 */
2910 rx_ring->sbq_base =
2911 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2912 &rx_ring->sbq_base_dma);
2913
2914 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002915 netif_err(qdev, ifup, qdev->ndev,
2916 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002917 goto err_mem;
2918 }
2919
2920 /*
2921 * Allocate small buffer queue control blocks.
2922 */
2923 rx_ring->sbq =
2924 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2925 GFP_KERNEL);
2926 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002927 netif_err(qdev, ifup, qdev->ndev,
2928 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002929 goto err_mem;
2930 }
2931
Ron Mercer4545a3f2009-02-23 10:42:17 +00002932 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002933 }
2934
2935 if (rx_ring->lbq_len) {
2936 /*
2937 * Allocate large buffer queue.
2938 */
2939 rx_ring->lbq_base =
2940 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2941 &rx_ring->lbq_base_dma);
2942
2943 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002944 netif_err(qdev, ifup, qdev->ndev,
2945 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002946 goto err_mem;
2947 }
2948 /*
2949 * Allocate large buffer queue control blocks.
2950 */
2951 rx_ring->lbq =
2952 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2953 GFP_KERNEL);
2954 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002955 netif_err(qdev, ifup, qdev->ndev,
2956 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002957 goto err_mem;
2958 }
2959
Ron Mercer4545a3f2009-02-23 10:42:17 +00002960 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002961 }
2962
2963 return 0;
2964
2965err_mem:
2966 ql_free_rx_resources(qdev, rx_ring);
2967 return -ENOMEM;
2968}
2969
2970static void ql_tx_ring_clean(struct ql_adapter *qdev)
2971{
2972 struct tx_ring *tx_ring;
2973 struct tx_ring_desc *tx_ring_desc;
2974 int i, j;
2975
2976 /*
2977 * Loop through all queues and free
2978 * any resources.
2979 */
2980 for (j = 0; j < qdev->tx_ring_count; j++) {
2981 tx_ring = &qdev->tx_ring[j];
2982 for (i = 0; i < tx_ring->wq_len; i++) {
2983 tx_ring_desc = &tx_ring->q[i];
2984 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002985 netif_err(qdev, ifdown, qdev->ndev,
2986 "Freeing lost SKB %p, from queue %d, index %d.\n",
2987 tx_ring_desc->skb, j,
2988 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002989 ql_unmap_send(qdev, tx_ring_desc,
2990 tx_ring_desc->map_cnt);
2991 dev_kfree_skb(tx_ring_desc->skb);
2992 tx_ring_desc->skb = NULL;
2993 }
2994 }
2995 }
2996}
2997
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002998static void ql_free_mem_resources(struct ql_adapter *qdev)
2999{
3000 int i;
3001
3002 for (i = 0; i < qdev->tx_ring_count; i++)
3003 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3004 for (i = 0; i < qdev->rx_ring_count; i++)
3005 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3006 ql_free_shadow_space(qdev);
3007}
3008
3009static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3010{
3011 int i;
3012
3013 /* Allocate space for our shadow registers and such. */
3014 if (ql_alloc_shadow_space(qdev))
3015 return -ENOMEM;
3016
3017 for (i = 0; i < qdev->rx_ring_count; i++) {
3018 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003019 netif_err(qdev, ifup, qdev->ndev,
3020 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003021 goto err_mem;
3022 }
3023 }
3024 /* Allocate tx queue resources */
3025 for (i = 0; i < qdev->tx_ring_count; i++) {
3026 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003027 netif_err(qdev, ifup, qdev->ndev,
3028 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003029 goto err_mem;
3030 }
3031 }
3032 return 0;
3033
3034err_mem:
3035 ql_free_mem_resources(qdev);
3036 return -ENOMEM;
3037}
3038
3039/* Set up the rx ring control block and pass it to the chip.
3040 * The control block is defined as
3041 * "Completion Queue Initialization Control Block", or cqicb.
3042 */
3043static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3044{
3045 struct cqicb *cqicb = &rx_ring->cqicb;
3046 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003047 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003048 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003049 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003050 void __iomem *doorbell_area =
3051 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3052 int err = 0;
3053 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003054 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003055 __le64 *base_indirect_ptr;
3056 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003057
3058 /* Set up the shadow registers for this ring. */
3059 rx_ring->prod_idx_sh_reg = shadow_reg;
3060 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003061 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003062 shadow_reg += sizeof(u64);
3063 shadow_reg_dma += sizeof(u64);
3064 rx_ring->lbq_base_indirect = shadow_reg;
3065 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003066 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3067 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003068 rx_ring->sbq_base_indirect = shadow_reg;
3069 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3070
3071 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003072 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003073 rx_ring->cnsmr_idx = 0;
3074 rx_ring->curr_entry = rx_ring->cq_base;
3075
3076 /* PCI doorbell mem area + 0x04 for valid register */
3077 rx_ring->valid_db_reg = doorbell_area + 0x04;
3078
3079 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003080 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003081
3082 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003083 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003084
3085 memset((void *)cqicb, 0, sizeof(struct cqicb));
3086 cqicb->msix_vect = rx_ring->irq;
3087
Ron Mercer459caf52009-01-04 17:08:11 -08003088 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3089 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003090
Ron Mercer97345522009-01-09 11:31:50 +00003091 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003092
Ron Mercer97345522009-01-09 11:31:50 +00003093 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003094
3095 /*
3096 * Set up the control block load flags.
3097 */
3098 cqicb->flags = FLAGS_LC | /* Load queue base address */
3099 FLAGS_LV | /* Load MSI-X vector */
3100 FLAGS_LI; /* Load irq delay values */
3101 if (rx_ring->lbq_len) {
3102 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003103 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003104 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003105 page_entries = 0;
3106 do {
3107 *base_indirect_ptr = cpu_to_le64(tmp);
3108 tmp += DB_PAGE_SIZE;
3109 base_indirect_ptr++;
3110 page_entries++;
3111 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003112 cqicb->lbq_addr =
3113 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003114 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3115 (u16) rx_ring->lbq_buf_size;
3116 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3117 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3118 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003119 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003120 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003121 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003122 rx_ring->lbq_clean_idx = 0;
3123 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003124 }
3125 if (rx_ring->sbq_len) {
3126 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003127 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003128 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003129 page_entries = 0;
3130 do {
3131 *base_indirect_ptr = cpu_to_le64(tmp);
3132 tmp += DB_PAGE_SIZE;
3133 base_indirect_ptr++;
3134 page_entries++;
3135 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003136 cqicb->sbq_addr =
3137 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003138 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003139 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003140 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3141 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003143 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003144 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003145 rx_ring->sbq_clean_idx = 0;
3146 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003147 }
3148 switch (rx_ring->type) {
3149 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003150 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3151 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3152 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003153 case RX_Q:
3154 /* Inbound completion handling rx_rings run in
3155 * separate NAPI contexts.
3156 */
3157 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3158 64);
3159 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3160 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3161 break;
3162 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003163 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3164 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003165 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003166 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3167 CFG_LCQ, rx_ring->cq_id);
3168 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003169 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003170 return err;
3171 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003172 return err;
3173}
3174
3175static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3176{
3177 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3178 void __iomem *doorbell_area =
3179 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3180 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3181 (tx_ring->wq_id * sizeof(u64));
3182 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3183 (tx_ring->wq_id * sizeof(u64));
3184 int err = 0;
3185
3186 /*
3187 * Assign doorbell registers for this tx_ring.
3188 */
3189 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003190 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003191 tx_ring->prod_idx = 0;
3192 /* TX PCI doorbell mem area + 0x04 */
3193 tx_ring->valid_db_reg = doorbell_area + 0x04;
3194
3195 /*
3196 * Assign shadow registers for this tx_ring.
3197 */
3198 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3199 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3200
3201 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3202 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3203 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3204 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3205 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003206 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003207
Ron Mercer97345522009-01-09 11:31:50 +00003208 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003209
3210 ql_init_tx_ring(qdev, tx_ring);
3211
Ron Mercere3324712009-07-02 06:06:13 +00003212 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003213 (u16) tx_ring->wq_id);
3214 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003215 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003216 return err;
3217 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003218 return err;
3219}
3220
3221static void ql_disable_msix(struct ql_adapter *qdev)
3222{
3223 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3224 pci_disable_msix(qdev->pdev);
3225 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3226 kfree(qdev->msi_x_entry);
3227 qdev->msi_x_entry = NULL;
3228 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3229 pci_disable_msi(qdev->pdev);
3230 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3231 }
3232}
3233
Ron Mercera4ab6132009-08-27 11:02:10 +00003234/* We start by trying to get the number of vectors
3235 * stored in qdev->intr_count. If we don't get that
3236 * many then we reduce the count and try again.
3237 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003238static void ql_enable_msix(struct ql_adapter *qdev)
3239{
Ron Mercera4ab6132009-08-27 11:02:10 +00003240 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003241
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003242 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003243 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003244 /* Try to alloc space for the msix struct,
3245 * if it fails then go to MSI/legacy.
3246 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003247 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003248 sizeof(struct msix_entry),
3249 GFP_KERNEL);
3250 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003251 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003252 goto msi;
3253 }
3254
Ron Mercera4ab6132009-08-27 11:02:10 +00003255 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003256 qdev->msi_x_entry[i].entry = i;
3257
Ron Mercera4ab6132009-08-27 11:02:10 +00003258 /* Loop to get our vectors. We start with
3259 * what we want and settle for what we get.
3260 */
3261 do {
3262 err = pci_enable_msix(qdev->pdev,
3263 qdev->msi_x_entry, qdev->intr_count);
3264 if (err > 0)
3265 qdev->intr_count = err;
3266 } while (err > 0);
3267
3268 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003269 kfree(qdev->msi_x_entry);
3270 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003271 netif_warn(qdev, ifup, qdev->ndev,
3272 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003273 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003274 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003275 } else if (err == 0) {
3276 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003277 netif_info(qdev, ifup, qdev->ndev,
3278 "MSI-X Enabled, got %d vectors.\n",
3279 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003280 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003281 }
3282 }
3283msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003284 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003285 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003286 if (!pci_enable_msi(qdev->pdev)) {
3287 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003288 netif_info(qdev, ifup, qdev->ndev,
3289 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003290 return;
3291 }
3292 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003293 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003294 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3295 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003296}
3297
Ron Mercer39aa8162009-08-27 11:02:11 +00003298/* Each vector services 1 RSS ring and and 1 or more
3299 * TX completion rings. This function loops through
3300 * the TX completion rings and assigns the vector that
3301 * will service it. An example would be if there are
3302 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3303 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003304 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003305 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3306 */
3307static void ql_set_tx_vect(struct ql_adapter *qdev)
3308{
3309 int i, j, vect;
3310 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3311
3312 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3313 /* Assign irq vectors to TX rx_rings.*/
3314 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3315 i < qdev->rx_ring_count; i++) {
3316 if (j == tx_rings_per_vector) {
3317 vect++;
3318 j = 0;
3319 }
3320 qdev->rx_ring[i].irq = vect;
3321 j++;
3322 }
3323 } else {
3324 /* For single vector all rings have an irq
3325 * of zero.
3326 */
3327 for (i = 0; i < qdev->rx_ring_count; i++)
3328 qdev->rx_ring[i].irq = 0;
3329 }
3330}
3331
3332/* Set the interrupt mask for this vector. Each vector
3333 * will service 1 RSS ring and 1 or more TX completion
3334 * rings. This function sets up a bit mask per vector
3335 * that indicates which rings it services.
3336 */
3337static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3338{
3339 int j, vect = ctx->intr;
3340 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3341
3342 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3343 /* Add the RSS ring serviced by this vector
3344 * to the mask.
3345 */
3346 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3347 /* Add the TX ring(s) serviced by this vector
3348 * to the mask. */
3349 for (j = 0; j < tx_rings_per_vector; j++) {
3350 ctx->irq_mask |=
3351 (1 << qdev->rx_ring[qdev->rss_ring_count +
3352 (vect * tx_rings_per_vector) + j].cq_id);
3353 }
3354 } else {
3355 /* For single vector we just shift each queue's
3356 * ID into the mask.
3357 */
3358 for (j = 0; j < qdev->rx_ring_count; j++)
3359 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3360 }
3361}
3362
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003363/*
3364 * Here we build the intr_context structures based on
3365 * our rx_ring count and intr vector count.
3366 * The intr_context structure is used to hook each vector
3367 * to possibly different handlers.
3368 */
3369static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3370{
3371 int i = 0;
3372 struct intr_context *intr_context = &qdev->intr_context[0];
3373
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003374 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3375 /* Each rx_ring has it's
3376 * own intr_context since we have separate
3377 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003378 */
3379 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3380 qdev->rx_ring[i].irq = i;
3381 intr_context->intr = i;
3382 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003383 /* Set up this vector's bit-mask that indicates
3384 * which queues it services.
3385 */
3386 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003387 /*
3388 * We set up each vectors enable/disable/read bits so
3389 * there's no bit/mask calculations in the critical path.
3390 */
3391 intr_context->intr_en_mask =
3392 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3393 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3394 | i;
3395 intr_context->intr_dis_mask =
3396 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3397 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3398 INTR_EN_IHD | i;
3399 intr_context->intr_read_mask =
3400 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3401 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3402 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003403 if (i == 0) {
3404 /* The first vector/queue handles
3405 * broadcast/multicast, fatal errors,
3406 * and firmware events. This in addition
3407 * to normal inbound NAPI processing.
3408 */
3409 intr_context->handler = qlge_isr;
3410 sprintf(intr_context->name, "%s-rx-%d",
3411 qdev->ndev->name, i);
3412 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003413 /*
3414 * Inbound queues handle unicast frames only.
3415 */
3416 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003417 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003418 qdev->ndev->name, i);
3419 }
3420 }
3421 } else {
3422 /*
3423 * All rx_rings use the same intr_context since
3424 * there is only one vector.
3425 */
3426 intr_context->intr = 0;
3427 intr_context->qdev = qdev;
3428 /*
3429 * We set up each vectors enable/disable/read bits so
3430 * there's no bit/mask calculations in the critical path.
3431 */
3432 intr_context->intr_en_mask =
3433 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3434 intr_context->intr_dis_mask =
3435 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3436 INTR_EN_TYPE_DISABLE;
3437 intr_context->intr_read_mask =
3438 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3439 /*
3440 * Single interrupt means one handler for all rings.
3441 */
3442 intr_context->handler = qlge_isr;
3443 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003444 /* Set up this vector's bit-mask that indicates
3445 * which queues it services. In this case there is
3446 * a single vector so it will service all RSS and
3447 * TX completion rings.
3448 */
3449 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003450 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003451 /* Tell the TX completion rings which MSIx vector
3452 * they will be using.
3453 */
3454 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003455}
3456
3457static void ql_free_irq(struct ql_adapter *qdev)
3458{
3459 int i;
3460 struct intr_context *intr_context = &qdev->intr_context[0];
3461
3462 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3463 if (intr_context->hooked) {
3464 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3465 free_irq(qdev->msi_x_entry[i].vector,
3466 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003467 } else {
3468 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003469 }
3470 }
3471 }
3472 ql_disable_msix(qdev);
3473}
3474
3475static int ql_request_irq(struct ql_adapter *qdev)
3476{
3477 int i;
3478 int status = 0;
3479 struct pci_dev *pdev = qdev->pdev;
3480 struct intr_context *intr_context = &qdev->intr_context[0];
3481
3482 ql_resolve_queues_to_irqs(qdev);
3483
3484 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3485 atomic_set(&intr_context->irq_cnt, 0);
3486 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3487 status = request_irq(qdev->msi_x_entry[i].vector,
3488 intr_context->handler,
3489 0,
3490 intr_context->name,
3491 &qdev->rx_ring[i]);
3492 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003493 netif_err(qdev, ifup, qdev->ndev,
3494 "Failed request for MSIX interrupt %d.\n",
3495 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003496 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003497 }
3498 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003499 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3500 "trying msi or legacy interrupts.\n");
3501 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3502 "%s: irq = %d.\n", __func__, pdev->irq);
3503 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3504 "%s: context->name = %s.\n", __func__,
3505 intr_context->name);
3506 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3507 "%s: dev_id = 0x%p.\n", __func__,
3508 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003509 status =
3510 request_irq(pdev->irq, qlge_isr,
3511 test_bit(QL_MSI_ENABLED,
3512 &qdev->
3513 flags) ? 0 : IRQF_SHARED,
3514 intr_context->name, &qdev->rx_ring[0]);
3515 if (status)
3516 goto err_irq;
3517
Joe Perchesae9540f72010-02-09 11:49:52 +00003518 netif_err(qdev, ifup, qdev->ndev,
3519 "Hooked intr %d, queue type %s, with name %s.\n",
3520 i,
3521 qdev->rx_ring[0].type == DEFAULT_Q ?
3522 "DEFAULT_Q" :
3523 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3524 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3525 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003526 }
3527 intr_context->hooked = 1;
3528 }
3529 return status;
3530err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003531 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003532 ql_free_irq(qdev);
3533 return status;
3534}
3535
3536static int ql_start_rss(struct ql_adapter *qdev)
3537{
Joe Perches215faf92010-12-21 02:16:10 -08003538 static const u8 init_hash_seed[] = {
3539 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3540 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3541 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3542 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3543 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3544 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003545 struct ricb *ricb = &qdev->ricb;
3546 int status = 0;
3547 int i;
3548 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3549
Ron Mercere3324712009-07-02 06:06:13 +00003550 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003551
Ron Mercerb2014ff2009-08-27 11:02:09 +00003552 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003554 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3555 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003556
3557 /*
3558 * Fill out the Indirection Table.
3559 */
Ron Mercer541ae282009-10-08 09:54:37 +00003560 for (i = 0; i < 1024; i++)
3561 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003562
Ron Mercer541ae282009-10-08 09:54:37 +00003563 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3564 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565
Ron Mercere3324712009-07-02 06:06:13 +00003566 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003567 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003568 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003569 return status;
3570 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003571 return status;
3572}
3573
Ron Mercera5f59dc2009-07-02 06:06:07 +00003574static int ql_clear_routing_entries(struct ql_adapter *qdev)
3575{
3576 int i, status = 0;
3577
3578 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3579 if (status)
3580 return status;
3581 /* Clear all the entries in the routing table. */
3582 for (i = 0; i < 16; i++) {
3583 status = ql_set_routing_reg(qdev, i, 0, 0);
3584 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003585 netif_err(qdev, ifup, qdev->ndev,
3586 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003587 break;
3588 }
3589 }
3590 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3591 return status;
3592}
3593
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003594/* Initialize the frame-to-queue routing. */
3595static int ql_route_initialize(struct ql_adapter *qdev)
3596{
3597 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598
3599 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003600 status = ql_clear_routing_entries(qdev);
3601 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003602 return status;
3603
3604 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3605 if (status)
3606 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003607
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003608 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3609 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003610 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003611 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003612 "Failed to init routing register "
3613 "for IP CSUM error packets.\n");
3614 goto exit;
3615 }
3616 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3617 RT_IDX_TU_CSUM_ERR, 1);
3618 if (status) {
3619 netif_err(qdev, ifup, qdev->ndev,
3620 "Failed to init routing register "
3621 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003622 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003623 }
3624 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3625 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003626 netif_err(qdev, ifup, qdev->ndev,
3627 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003628 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003629 }
3630 /* If we have more than one inbound queue, then turn on RSS in the
3631 * routing block.
3632 */
3633 if (qdev->rss_ring_count > 1) {
3634 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3635 RT_IDX_RSS_MATCH, 1);
3636 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003637 netif_err(qdev, ifup, qdev->ndev,
3638 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003639 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003640 }
3641 }
3642
3643 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3644 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003645 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003646 netif_err(qdev, ifup, qdev->ndev,
3647 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003648exit:
3649 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003650 return status;
3651}
3652
Ron Mercer2ee1e272009-03-03 12:10:33 +00003653int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003654{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003655 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003656
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003657 /* If check if the link is up and use to
3658 * determine if we are setting or clearing
3659 * the MAC address in the CAM.
3660 */
3661 set = ql_read32(qdev, STS);
3662 set &= qdev->port_link_up;
3663 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003664 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003665 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003666 return status;
3667 }
3668
3669 status = ql_route_initialize(qdev);
3670 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003671 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003672
3673 return status;
3674}
3675
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003676static int ql_adapter_initialize(struct ql_adapter *qdev)
3677{
3678 u32 value, mask;
3679 int i;
3680 int status = 0;
3681
3682 /*
3683 * Set up the System register to halt on errors.
3684 */
3685 value = SYS_EFE | SYS_FAE;
3686 mask = value << 16;
3687 ql_write32(qdev, SYS, mask | value);
3688
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003689 /* Set the default queue, and VLAN behavior. */
3690 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3691 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003692 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3693
3694 /* Set the MPI interrupt to enabled. */
3695 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3696
3697 /* Enable the function, set pagesize, enable error checking. */
3698 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003699 FSC_EC | FSC_VM_PAGE_4K;
3700 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003701
3702 /* Set/clear header splitting. */
3703 mask = FSC_VM_PAGESIZE_MASK |
3704 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3705 ql_write32(qdev, FSC, mask | value);
3706
Ron Mercer572c5262010-01-02 10:37:42 +00003707 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003708
Ron Mercera3b71932009-10-08 09:54:38 +00003709 /* Set RX packet routing to use port/pci function on which the
3710 * packet arrived on in addition to usual frame routing.
3711 * This is helpful on bonding where both interfaces can have
3712 * the same MAC address.
3713 */
3714 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003715 /* Reroute all packets to our Interface.
3716 * They may have been routed to MPI firmware
3717 * due to WOL.
3718 */
3719 value = ql_read32(qdev, MGMT_RCV_CFG);
3720 value &= ~MGMT_RCV_CFG_RM;
3721 mask = 0xffff0000;
3722
3723 /* Sticky reg needs clearing due to WOL. */
3724 ql_write32(qdev, MGMT_RCV_CFG, mask);
3725 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3726
3727 /* Default WOL is enable on Mezz cards */
3728 if (qdev->pdev->subsystem_device == 0x0068 ||
3729 qdev->pdev->subsystem_device == 0x0180)
3730 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003731
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003732 /* Start up the rx queues. */
3733 for (i = 0; i < qdev->rx_ring_count; i++) {
3734 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3735 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003736 netif_err(qdev, ifup, qdev->ndev,
3737 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003738 return status;
3739 }
3740 }
3741
3742 /* If there is more than one inbound completion queue
3743 * then download a RICB to configure RSS.
3744 */
3745 if (qdev->rss_ring_count > 1) {
3746 status = ql_start_rss(qdev);
3747 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003748 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003749 return status;
3750 }
3751 }
3752
3753 /* Start up the tx queues. */
3754 for (i = 0; i < qdev->tx_ring_count; i++) {
3755 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3756 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003757 netif_err(qdev, ifup, qdev->ndev,
3758 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003759 return status;
3760 }
3761 }
3762
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003763 /* Initialize the port and set the max framesize. */
3764 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003765 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003766 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003767
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003768 /* Set up the MAC address and frame routing filter. */
3769 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003770 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003771 netif_err(qdev, ifup, qdev->ndev,
3772 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003773 return status;
3774 }
3775
3776 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003777 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003778 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003779
3780 return status;
3781}
3782
3783/* Issue soft reset to chip. */
3784static int ql_adapter_reset(struct ql_adapter *qdev)
3785{
3786 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003787 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003788 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003789
Ron Mercera5f59dc2009-07-02 06:06:07 +00003790 /* Clear all the entries in the routing table. */
3791 status = ql_clear_routing_entries(qdev);
3792 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003793 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003794 return status;
3795 }
3796
3797 end_jiffies = jiffies +
3798 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003799
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003800 /* Check if bit is set then skip the mailbox command and
3801 * clear the bit, else we are in normal reset process.
3802 */
3803 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3804 /* Stop management traffic. */
3805 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003806
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003807 /* Wait for the NIC and MGMNT FIFOs to empty. */
3808 ql_wait_fifo_empty(qdev);
3809 } else
3810 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003811
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003812 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003813
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003814 do {
3815 value = ql_read32(qdev, RST_FO);
3816 if ((value & RST_FO_FR) == 0)
3817 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003818 cpu_relax();
3819 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003820
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003821 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003822 netif_err(qdev, ifdown, qdev->ndev,
3823 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003824 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003825 }
3826
Ron Mercer84087f42009-10-08 09:54:41 +00003827 /* Resume management traffic. */
3828 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003829 return status;
3830}
3831
3832static void ql_display_dev_info(struct net_device *ndev)
3833{
Joe Perchesb16fed02010-11-15 11:12:28 +00003834 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003835
Joe Perchesae9540f72010-02-09 11:49:52 +00003836 netif_info(qdev, probe, qdev->ndev,
3837 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3838 "XG Roll = %d, XG Rev = %d.\n",
3839 qdev->func,
3840 qdev->port,
3841 qdev->chip_rev_id & 0x0000000f,
3842 qdev->chip_rev_id >> 4 & 0x0000000f,
3843 qdev->chip_rev_id >> 8 & 0x0000000f,
3844 qdev->chip_rev_id >> 12 & 0x0000000f);
3845 netif_info(qdev, probe, qdev->ndev,
3846 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003847}
3848
stephen hemmingerac409212010-10-21 07:50:54 +00003849static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003850{
3851 int status = 0;
3852 u32 wol = MB_WOL_DISABLE;
3853
3854 /* The CAM is still intact after a reset, but if we
3855 * are doing WOL, then we may need to program the
3856 * routing regs. We would also need to issue the mailbox
3857 * commands to instruct the MPI what to do per the ethtool
3858 * settings.
3859 */
3860
3861 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3862 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003863 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003864 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003865 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003866 return -EINVAL;
3867 }
3868
3869 if (qdev->wol & WAKE_MAGIC) {
3870 status = ql_mb_wol_set_magic(qdev, 1);
3871 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003872 netif_err(qdev, ifdown, qdev->ndev,
3873 "Failed to set magic packet on %s.\n",
3874 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003875 return status;
3876 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003877 netif_info(qdev, drv, qdev->ndev,
3878 "Enabled magic packet successfully on %s.\n",
3879 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003880
3881 wol |= MB_WOL_MAGIC_PKT;
3882 }
3883
3884 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003885 wol |= MB_WOL_MODE_ON;
3886 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003887 netif_err(qdev, drv, qdev->ndev,
3888 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003889 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003890 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003891 }
3892
3893 return status;
3894}
3895
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003896static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003897{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003898
Ron Mercer6497b602009-02-12 16:37:13 -08003899 /* Don't kill the reset worker thread if we
3900 * are in the process of recovery.
3901 */
3902 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3903 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003904 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3905 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003906 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003907 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003908 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003909}
3910
3911static int ql_adapter_down(struct ql_adapter *qdev)
3912{
3913 int i, status = 0;
3914
3915 ql_link_off(qdev);
3916
3917 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003918
Ron Mercer39aa8162009-08-27 11:02:11 +00003919 for (i = 0; i < qdev->rss_ring_count; i++)
3920 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003921
3922 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3923
3924 ql_disable_interrupts(qdev);
3925
3926 ql_tx_ring_clean(qdev);
3927
Ron Mercer6b318cb2009-03-09 10:59:26 +00003928 /* Call netif_napi_del() from common point.
3929 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003930 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003931 netif_napi_del(&qdev->rx_ring[i].napi);
3932
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933 status = ql_adapter_reset(qdev);
3934 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003935 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3936 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003937 ql_free_rx_buffers(qdev);
3938
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003939 return status;
3940}
3941
3942static int ql_adapter_up(struct ql_adapter *qdev)
3943{
3944 int err = 0;
3945
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003946 err = ql_adapter_initialize(qdev);
3947 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003948 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003949 goto err_init;
3950 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003951 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003952 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003953 /* If the port is initialized and the
3954 * link is up the turn on the carrier.
3955 */
3956 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3957 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003958 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003959 /* Restore rx mode. */
3960 clear_bit(QL_ALLMULTI, &qdev->flags);
3961 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3962 qlge_set_multicast_list(qdev->ndev);
3963
Ron Mercerc1b60092010-10-27 04:58:12 +00003964 /* Restore vlan setting. */
3965 qlge_restore_vlan(qdev);
3966
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003967 ql_enable_interrupts(qdev);
3968 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003969 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003970
3971 return 0;
3972err_init:
3973 ql_adapter_reset(qdev);
3974 return err;
3975}
3976
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003977static void ql_release_adapter_resources(struct ql_adapter *qdev)
3978{
3979 ql_free_mem_resources(qdev);
3980 ql_free_irq(qdev);
3981}
3982
3983static int ql_get_adapter_resources(struct ql_adapter *qdev)
3984{
3985 int status = 0;
3986
3987 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003988 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003989 return -ENOMEM;
3990 }
3991 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003992 return status;
3993}
3994
3995static int qlge_close(struct net_device *ndev)
3996{
3997 struct ql_adapter *qdev = netdev_priv(ndev);
3998
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003999 /* If we hit pci_channel_io_perm_failure
4000 * failure condition, then we already
4001 * brought the adapter down.
4002 */
4003 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004004 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004005 clear_bit(QL_EEH_FATAL, &qdev->flags);
4006 return 0;
4007 }
4008
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004009 /*
4010 * Wait for device to recover from a reset.
4011 * (Rarely happens, but possible.)
4012 */
4013 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4014 msleep(1);
4015 ql_adapter_down(qdev);
4016 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004017 return 0;
4018}
4019
4020static int ql_configure_rings(struct ql_adapter *qdev)
4021{
4022 int i;
4023 struct rx_ring *rx_ring;
4024 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004025 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004026 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4027 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4028
4029 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004030
Ron Mercera4ab6132009-08-27 11:02:10 +00004031 /* In a perfect world we have one RSS ring for each CPU
4032 * and each has it's own vector. To do that we ask for
4033 * cpu_cnt vectors. ql_enable_msix() will adjust the
4034 * vector count to what we actually get. We then
4035 * allocate an RSS ring for each.
4036 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004037 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004038 qdev->intr_count = cpu_cnt;
4039 ql_enable_msix(qdev);
4040 /* Adjust the RSS ring count to the actual vector count. */
4041 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004042 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004043 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004044
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004045 for (i = 0; i < qdev->tx_ring_count; i++) {
4046 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004047 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004048 tx_ring->qdev = qdev;
4049 tx_ring->wq_id = i;
4050 tx_ring->wq_len = qdev->tx_ring_size;
4051 tx_ring->wq_size =
4052 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4053
4054 /*
4055 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004056 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004057 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004058 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004059 }
4060
4061 for (i = 0; i < qdev->rx_ring_count; i++) {
4062 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004063 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004064 rx_ring->qdev = qdev;
4065 rx_ring->cq_id = i;
4066 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004067 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004068 /*
4069 * Inbound (RSS) queues.
4070 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004071 rx_ring->cq_len = qdev->rx_ring_size;
4072 rx_ring->cq_size =
4073 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4074 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4075 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004076 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004077 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004078 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4079 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004080 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004081 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004082 rx_ring->type = RX_Q;
4083 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004084 /*
4085 * Outbound queue handles outbound completions only.
4086 */
4087 /* outbound cq is same size as tx_ring it services. */
4088 rx_ring->cq_len = qdev->tx_ring_size;
4089 rx_ring->cq_size =
4090 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4091 rx_ring->lbq_len = 0;
4092 rx_ring->lbq_size = 0;
4093 rx_ring->lbq_buf_size = 0;
4094 rx_ring->sbq_len = 0;
4095 rx_ring->sbq_size = 0;
4096 rx_ring->sbq_buf_size = 0;
4097 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004098 }
4099 }
4100 return 0;
4101}
4102
4103static int qlge_open(struct net_device *ndev)
4104{
4105 int err = 0;
4106 struct ql_adapter *qdev = netdev_priv(ndev);
4107
Ron Mercer74e12432009-11-11 12:54:04 +00004108 err = ql_adapter_reset(qdev);
4109 if (err)
4110 return err;
4111
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004112 err = ql_configure_rings(qdev);
4113 if (err)
4114 return err;
4115
4116 err = ql_get_adapter_resources(qdev);
4117 if (err)
4118 goto error_up;
4119
4120 err = ql_adapter_up(qdev);
4121 if (err)
4122 goto error_up;
4123
4124 return err;
4125
4126error_up:
4127 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004128 return err;
4129}
4130
Ron Mercer7c734352009-10-19 03:32:19 +00004131static int ql_change_rx_buffers(struct ql_adapter *qdev)
4132{
4133 struct rx_ring *rx_ring;
4134 int i, status;
4135 u32 lbq_buf_len;
4136
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004137 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004138 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4139 int i = 3;
4140 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004141 netif_err(qdev, ifup, qdev->ndev,
4142 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004143 ssleep(1);
4144 }
4145
4146 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004147 netif_err(qdev, ifup, qdev->ndev,
4148 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004149 return -ETIMEDOUT;
4150 }
4151 }
4152
4153 status = ql_adapter_down(qdev);
4154 if (status)
4155 goto error;
4156
4157 /* Get the new rx buffer size. */
4158 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4159 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4160 qdev->lbq_buf_order = get_order(lbq_buf_len);
4161
4162 for (i = 0; i < qdev->rss_ring_count; i++) {
4163 rx_ring = &qdev->rx_ring[i];
4164 /* Set the new size. */
4165 rx_ring->lbq_buf_size = lbq_buf_len;
4166 }
4167
4168 status = ql_adapter_up(qdev);
4169 if (status)
4170 goto error;
4171
4172 return status;
4173error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004174 netif_alert(qdev, ifup, qdev->ndev,
4175 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004176 set_bit(QL_ADAPTER_UP, &qdev->flags);
4177 dev_close(qdev->ndev);
4178 return status;
4179}
4180
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004181static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4182{
4183 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004184 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004185
4186 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004187 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004188 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004189 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004190 } else
4191 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004192
4193 queue_delayed_work(qdev->workqueue,
4194 &qdev->mpi_port_cfg_work, 3*HZ);
4195
Breno Leitao746079d2010-02-04 10:11:19 +00004196 ndev->mtu = new_mtu;
4197
Ron Mercer7c734352009-10-19 03:32:19 +00004198 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004199 return 0;
4200 }
4201
Ron Mercer7c734352009-10-19 03:32:19 +00004202 status = ql_change_rx_buffers(qdev);
4203 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004204 netif_err(qdev, ifup, qdev->ndev,
4205 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004206 }
4207
4208 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004209}
4210
4211static struct net_device_stats *qlge_get_stats(struct net_device
4212 *ndev)
4213{
Ron Mercer885ee392009-11-03 13:49:31 +00004214 struct ql_adapter *qdev = netdev_priv(ndev);
4215 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4216 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4217 unsigned long pkts, mcast, dropped, errors, bytes;
4218 int i;
4219
4220 /* Get RX stats. */
4221 pkts = mcast = dropped = errors = bytes = 0;
4222 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4223 pkts += rx_ring->rx_packets;
4224 bytes += rx_ring->rx_bytes;
4225 dropped += rx_ring->rx_dropped;
4226 errors += rx_ring->rx_errors;
4227 mcast += rx_ring->rx_multicast;
4228 }
4229 ndev->stats.rx_packets = pkts;
4230 ndev->stats.rx_bytes = bytes;
4231 ndev->stats.rx_dropped = dropped;
4232 ndev->stats.rx_errors = errors;
4233 ndev->stats.multicast = mcast;
4234
4235 /* Get TX stats. */
4236 pkts = errors = bytes = 0;
4237 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4238 pkts += tx_ring->tx_packets;
4239 bytes += tx_ring->tx_bytes;
4240 errors += tx_ring->tx_errors;
4241 }
4242 ndev->stats.tx_packets = pkts;
4243 ndev->stats.tx_bytes = bytes;
4244 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004245 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004246}
4247
stephen hemmingerac409212010-10-21 07:50:54 +00004248static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004249{
Joe Perchesb16fed02010-11-15 11:12:28 +00004250 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004251 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004252 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004253
Ron Mercercc288f52009-02-23 10:42:14 +00004254 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4255 if (status)
4256 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004257 /*
4258 * Set or clear promiscuous mode if a
4259 * transition is taking place.
4260 */
4261 if (ndev->flags & IFF_PROMISC) {
4262 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4263 if (ql_set_routing_reg
4264 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004265 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004266 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004267 } else {
4268 set_bit(QL_PROMISCUOUS, &qdev->flags);
4269 }
4270 }
4271 } else {
4272 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4273 if (ql_set_routing_reg
4274 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004275 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004276 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004277 } else {
4278 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4279 }
4280 }
4281 }
4282
4283 /*
4284 * Set or clear all multicast mode if a
4285 * transition is taking place.
4286 */
4287 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004288 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004289 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4290 if (ql_set_routing_reg
4291 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004292 netif_err(qdev, hw, qdev->ndev,
4293 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004294 } else {
4295 set_bit(QL_ALLMULTI, &qdev->flags);
4296 }
4297 }
4298 } else {
4299 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4300 if (ql_set_routing_reg
4301 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004302 netif_err(qdev, hw, qdev->ndev,
4303 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004304 } else {
4305 clear_bit(QL_ALLMULTI, &qdev->flags);
4306 }
4307 }
4308 }
4309
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004310 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004311 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4312 if (status)
4313 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004314 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004315 netdev_for_each_mc_addr(ha, ndev) {
4316 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004317 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004318 netif_err(qdev, hw, qdev->ndev,
4319 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004320 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004321 goto exit;
4322 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004323 i++;
4324 }
Ron Mercercc288f52009-02-23 10:42:14 +00004325 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004326 if (ql_set_routing_reg
4327 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004328 netif_err(qdev, hw, qdev->ndev,
4329 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004330 } else {
4331 set_bit(QL_ALLMULTI, &qdev->flags);
4332 }
4333 }
4334exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004335 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004336}
4337
4338static int qlge_set_mac_address(struct net_device *ndev, void *p)
4339{
Joe Perchesb16fed02010-11-15 11:12:28 +00004340 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004341 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004342 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004343
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004344 if (!is_valid_ether_addr(addr->sa_data))
4345 return -EADDRNOTAVAIL;
4346 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004347 /* Update local copy of current mac address. */
4348 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004349
Ron Mercercc288f52009-02-23 10:42:14 +00004350 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4351 if (status)
4352 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004353 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4354 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004355 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004356 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004357 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4358 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004359}
4360
4361static void qlge_tx_timeout(struct net_device *ndev)
4362{
Joe Perchesb16fed02010-11-15 11:12:28 +00004363 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004364 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004365}
4366
4367static void ql_asic_reset_work(struct work_struct *work)
4368{
4369 struct ql_adapter *qdev =
4370 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004371 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004372 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004373 status = ql_adapter_down(qdev);
4374 if (status)
4375 goto error;
4376
4377 status = ql_adapter_up(qdev);
4378 if (status)
4379 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004380
4381 /* Restore rx mode. */
4382 clear_bit(QL_ALLMULTI, &qdev->flags);
4383 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4384 qlge_set_multicast_list(qdev->ndev);
4385
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004386 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004387 return;
4388error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004389 netif_alert(qdev, ifup, qdev->ndev,
4390 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004391
Ron Mercerdb988122009-03-09 10:59:17 +00004392 set_bit(QL_ADAPTER_UP, &qdev->flags);
4393 dev_close(qdev->ndev);
4394 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004395}
4396
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004397static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004398 .get_flash = ql_get_8012_flash_params,
4399 .port_initialize = ql_8012_port_initialize,
4400};
4401
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004402static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004403 .get_flash = ql_get_8000_flash_params,
4404 .port_initialize = ql_8000_port_initialize,
4405};
4406
Ron Mercere4552f52009-06-09 05:39:32 +00004407/* Find the pcie function number for the other NIC
4408 * on this chip. Since both NIC functions share a
4409 * common firmware we have the lowest enabled function
4410 * do any common work. Examples would be resetting
4411 * after a fatal firmware error, or doing a firmware
4412 * coredump.
4413 */
4414static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004415{
Ron Mercere4552f52009-06-09 05:39:32 +00004416 int status = 0;
4417 u32 temp;
4418 u32 nic_func1, nic_func2;
4419
4420 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4421 &temp);
4422 if (status)
4423 return status;
4424
4425 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4426 MPI_TEST_NIC_FUNC_MASK);
4427 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4428 MPI_TEST_NIC_FUNC_MASK);
4429
4430 if (qdev->func == nic_func1)
4431 qdev->alt_func = nic_func2;
4432 else if (qdev->func == nic_func2)
4433 qdev->alt_func = nic_func1;
4434 else
4435 status = -EIO;
4436
4437 return status;
4438}
4439
4440static int ql_get_board_info(struct ql_adapter *qdev)
4441{
4442 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004443 qdev->func =
4444 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004445 if (qdev->func > 3)
4446 return -EIO;
4447
4448 status = ql_get_alt_pcie_func(qdev);
4449 if (status)
4450 return status;
4451
4452 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4453 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004454 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4455 qdev->port_link_up = STS_PL1;
4456 qdev->port_init = STS_PI1;
4457 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4458 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4459 } else {
4460 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4461 qdev->port_link_up = STS_PL0;
4462 qdev->port_init = STS_PI0;
4463 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4464 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4465 }
4466 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004467 qdev->device_id = qdev->pdev->device;
4468 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4469 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004470 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4471 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004472 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004473}
4474
4475static void ql_release_all(struct pci_dev *pdev)
4476{
4477 struct net_device *ndev = pci_get_drvdata(pdev);
4478 struct ql_adapter *qdev = netdev_priv(ndev);
4479
4480 if (qdev->workqueue) {
4481 destroy_workqueue(qdev->workqueue);
4482 qdev->workqueue = NULL;
4483 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004484
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004485 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004486 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004487 if (qdev->doorbell_area)
4488 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004489 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004490 pci_release_regions(pdev);
4491 pci_set_drvdata(pdev, NULL);
4492}
4493
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004494static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4495 int cards_found)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004496{
4497 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004498 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004499
Ron Mercere3324712009-07-02 06:06:13 +00004500 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004501 err = pci_enable_device(pdev);
4502 if (err) {
4503 dev_err(&pdev->dev, "PCI device enable failed.\n");
4504 return err;
4505 }
4506
Ron Mercerebd6e772009-09-29 08:39:25 +00004507 qdev->ndev = ndev;
4508 qdev->pdev = pdev;
4509 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004510
Ron Mercerbc9167f2009-10-10 09:35:04 +00004511 /* Set PCIe read request size */
4512 err = pcie_set_readrq(pdev, 4096);
4513 if (err) {
4514 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004515 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004516 }
4517
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004518 err = pci_request_regions(pdev, DRV_NAME);
4519 if (err) {
4520 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004521 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004522 }
4523
4524 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004525 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004526 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004527 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004528 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004529 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004531 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004532 }
4533
4534 if (err) {
4535 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004536 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004537 }
4538
Ron Mercer73475332009-11-06 07:44:58 +00004539 /* Set PCIe reset type for EEH to fundamental. */
4540 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004541 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004542 qdev->reg_base =
4543 ioremap_nocache(pci_resource_start(pdev, 1),
4544 pci_resource_len(pdev, 1));
4545 if (!qdev->reg_base) {
4546 dev_err(&pdev->dev, "Register mapping failed.\n");
4547 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004548 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549 }
4550
4551 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4552 qdev->doorbell_area =
4553 ioremap_nocache(pci_resource_start(pdev, 3),
4554 pci_resource_len(pdev, 3));
4555 if (!qdev->doorbell_area) {
4556 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4557 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004558 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004559 }
4560
Ron Mercere4552f52009-06-09 05:39:32 +00004561 err = ql_get_board_info(qdev);
4562 if (err) {
4563 dev_err(&pdev->dev, "Register access failed.\n");
4564 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004565 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004566 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004567 qdev->msg_enable = netif_msg_init(debug, default_msg);
4568 spin_lock_init(&qdev->hw_lock);
4569 spin_lock_init(&qdev->stats_lock);
4570
Ron Mercer8aae2602010-01-15 13:31:28 +00004571 if (qlge_mpi_coredump) {
4572 qdev->mpi_coredump =
4573 vmalloc(sizeof(struct ql_mpi_coredump));
4574 if (qdev->mpi_coredump == NULL) {
4575 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4576 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004577 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004578 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004579 if (qlge_force_coredump)
4580 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004581 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004583 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004584 if (err) {
4585 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004586 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 }
4588
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004589 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004590 /* Keep local copy of current mac address. */
4591 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004592
4593 /* Set up the default ring sizes. */
4594 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4595 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4596
4597 /* Set up the coalescing parameters. */
4598 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4599 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4600 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4601 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4602
4603 /*
4604 * Set up the operating parameters.
4605 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004606 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4607 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4608 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4609 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004610 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004611 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004612 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004613 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004614 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004615
4616 if (!cards_found) {
4617 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4618 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4619 DRV_NAME, DRV_VERSION);
4620 }
4621 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004622err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004623 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004624err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004625 pci_disable_device(pdev);
4626 return err;
4627}
4628
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004629static const struct net_device_ops qlge_netdev_ops = {
4630 .ndo_open = qlge_open,
4631 .ndo_stop = qlge_close,
4632 .ndo_start_xmit = qlge_send,
4633 .ndo_change_mtu = qlge_change_mtu,
4634 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004635 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004636 .ndo_set_mac_address = qlge_set_mac_address,
4637 .ndo_validate_addr = eth_validate_addr,
4638 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004639 .ndo_fix_features = qlge_fix_features,
4640 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004641 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4642 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004643};
4644
Ron Mercer15c052f2010-02-04 13:32:46 -08004645static void ql_timer(unsigned long data)
4646{
4647 struct ql_adapter *qdev = (struct ql_adapter *)data;
4648 u32 var = 0;
4649
4650 var = ql_read32(qdev, STS);
4651 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004652 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004653 return;
4654 }
4655
Breno Leitao72046d82010-07-01 03:00:17 +00004656 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004657}
4658
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004659static int qlge_probe(struct pci_dev *pdev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004660 const struct pci_device_id *pci_entry)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004661{
4662 struct net_device *ndev = NULL;
4663 struct ql_adapter *qdev = NULL;
4664 static int cards_found = 0;
4665 int err = 0;
4666
Ron Mercer1e213302009-03-09 10:59:21 +00004667 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004668 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004669 if (!ndev)
4670 return -ENOMEM;
4671
4672 err = ql_init_device(pdev, ndev, cards_found);
4673 if (err < 0) {
4674 free_netdev(ndev);
4675 return err;
4676 }
4677
4678 qdev = netdev_priv(ndev);
4679 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004680 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4681 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4682 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4683 ndev->features = ndev->hw_features |
4684 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
brenohl@br.ibm.com1a0150a92012-07-27 08:54:52 +00004685 ndev->vlan_features = ndev->hw_features;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004686
4687 if (test_bit(QL_DMA64, &qdev->flags))
4688 ndev->features |= NETIF_F_HIGHDMA;
4689
4690 /*
4691 * Set up net_device structure.
4692 */
4693 ndev->tx_queue_len = qdev->tx_ring_size;
4694 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004695
4696 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004697 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004698 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004699
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004700 err = register_netdev(ndev);
4701 if (err) {
4702 dev_err(&pdev->dev, "net device registration failed.\n");
4703 ql_release_all(pdev);
4704 pci_disable_device(pdev);
4705 return err;
4706 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004707 /* Start up the timer to trigger EEH if
4708 * the bus goes dead
4709 */
4710 init_timer_deferrable(&qdev->timer);
4711 qdev->timer.data = (unsigned long)qdev;
4712 qdev->timer.function = ql_timer;
4713 qdev->timer.expires = jiffies + (5*HZ);
4714 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004715 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004716 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004717 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004718 cards_found++;
4719 return 0;
4720}
4721
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004722netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4723{
4724 return qlge_send(skb, ndev);
4725}
4726
4727int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4728{
4729 return ql_clean_inbound_rx_ring(rx_ring, budget);
4730}
4731
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004732static void qlge_remove(struct pci_dev *pdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004733{
4734 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004735 struct ql_adapter *qdev = netdev_priv(ndev);
4736 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004737 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004738 unregister_netdev(ndev);
4739 ql_release_all(pdev);
4740 pci_disable_device(pdev);
4741 free_netdev(ndev);
4742}
4743
Ron Mercer6d190c62009-10-28 08:39:20 +00004744/* Clean up resources without touching hardware. */
4745static void ql_eeh_close(struct net_device *ndev)
4746{
4747 int i;
4748 struct ql_adapter *qdev = netdev_priv(ndev);
4749
4750 if (netif_carrier_ok(ndev)) {
4751 netif_carrier_off(ndev);
4752 netif_stop_queue(ndev);
4753 }
4754
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004755 /* Disabling the timer */
4756 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004757 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004758
4759 for (i = 0; i < qdev->rss_ring_count; i++)
4760 netif_napi_del(&qdev->rx_ring[i].napi);
4761
4762 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4763 ql_tx_ring_clean(qdev);
4764 ql_free_rx_buffers(qdev);
4765 ql_release_adapter_resources(qdev);
4766}
4767
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004768/*
4769 * This callback is called by the PCI subsystem whenever
4770 * a PCI bus error is detected.
4771 */
4772static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4773 enum pci_channel_state state)
4774{
4775 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004776 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004777
Ron Mercer6d190c62009-10-28 08:39:20 +00004778 switch (state) {
4779 case pci_channel_io_normal:
4780 return PCI_ERS_RESULT_CAN_RECOVER;
4781 case pci_channel_io_frozen:
4782 netif_device_detach(ndev);
4783 if (netif_running(ndev))
4784 ql_eeh_close(ndev);
4785 pci_disable_device(pdev);
4786 return PCI_ERS_RESULT_NEED_RESET;
4787 case pci_channel_io_perm_failure:
4788 dev_err(&pdev->dev,
4789 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004790 ql_eeh_close(ndev);
4791 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004792 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004793 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004794
4795 /* Request a slot reset. */
4796 return PCI_ERS_RESULT_NEED_RESET;
4797}
4798
4799/*
4800 * This callback is called after the PCI buss has been reset.
4801 * Basically, this tries to restart the card from scratch.
4802 * This is a shortened version of the device probe/discovery code,
4803 * it resembles the first-half of the () routine.
4804 */
4805static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4806{
4807 struct net_device *ndev = pci_get_drvdata(pdev);
4808 struct ql_adapter *qdev = netdev_priv(ndev);
4809
Ron Mercer6d190c62009-10-28 08:39:20 +00004810 pdev->error_state = pci_channel_io_normal;
4811
4812 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004813 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004814 netif_err(qdev, ifup, qdev->ndev,
4815 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004816 return PCI_ERS_RESULT_DISCONNECT;
4817 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004818 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004819
4820 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004821 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004822 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004823 return PCI_ERS_RESULT_DISCONNECT;
4824 }
4825
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004826 return PCI_ERS_RESULT_RECOVERED;
4827}
4828
4829static void qlge_io_resume(struct pci_dev *pdev)
4830{
4831 struct net_device *ndev = pci_get_drvdata(pdev);
4832 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004833 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004834
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004835 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004836 err = qlge_open(ndev);
4837 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004838 netif_err(qdev, ifup, qdev->ndev,
4839 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004840 return;
4841 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004842 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004843 netif_err(qdev, ifup, qdev->ndev,
4844 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004845 }
Breno Leitao72046d82010-07-01 03:00:17 +00004846 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004847 netif_device_attach(ndev);
4848}
4849
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004850static const struct pci_error_handlers qlge_err_handler = {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004851 .error_detected = qlge_io_error_detected,
4852 .slot_reset = qlge_io_slot_reset,
4853 .resume = qlge_io_resume,
4854};
4855
4856static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4857{
4858 struct net_device *ndev = pci_get_drvdata(pdev);
4859 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004860 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004861
4862 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004863 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004864
4865 if (netif_running(ndev)) {
4866 err = ql_adapter_down(qdev);
4867 if (!err)
4868 return err;
4869 }
4870
Ron Mercerbc083ce2009-10-21 11:07:40 +00004871 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004872 err = pci_save_state(pdev);
4873 if (err)
4874 return err;
4875
4876 pci_disable_device(pdev);
4877
4878 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4879
4880 return 0;
4881}
4882
David S. Miller04da2cf2008-09-19 16:14:24 -07004883#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004884static int qlge_resume(struct pci_dev *pdev)
4885{
4886 struct net_device *ndev = pci_get_drvdata(pdev);
4887 struct ql_adapter *qdev = netdev_priv(ndev);
4888 int err;
4889
4890 pci_set_power_state(pdev, PCI_D0);
4891 pci_restore_state(pdev);
4892 err = pci_enable_device(pdev);
4893 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004894 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004895 return err;
4896 }
4897 pci_set_master(pdev);
4898
4899 pci_enable_wake(pdev, PCI_D3hot, 0);
4900 pci_enable_wake(pdev, PCI_D3cold, 0);
4901
4902 if (netif_running(ndev)) {
4903 err = ql_adapter_up(qdev);
4904 if (err)
4905 return err;
4906 }
4907
Breno Leitao72046d82010-07-01 03:00:17 +00004908 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004909 netif_device_attach(ndev);
4910
4911 return 0;
4912}
David S. Miller04da2cf2008-09-19 16:14:24 -07004913#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004914
4915static void qlge_shutdown(struct pci_dev *pdev)
4916{
4917 qlge_suspend(pdev, PMSG_SUSPEND);
4918}
4919
4920static struct pci_driver qlge_driver = {
4921 .name = DRV_NAME,
4922 .id_table = qlge_pci_tbl,
4923 .probe = qlge_probe,
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004924 .remove = qlge_remove,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004925#ifdef CONFIG_PM
4926 .suspend = qlge_suspend,
4927 .resume = qlge_resume,
4928#endif
4929 .shutdown = qlge_shutdown,
4930 .err_handler = &qlge_err_handler
4931};
4932
4933static int __init qlge_init_module(void)
4934{
4935 return pci_register_driver(&qlge_driver);
4936}
4937
4938static void __exit qlge_exit(void)
4939{
4940 pci_unregister_driver(&qlge_driver);
4941}
4942
4943module_init(qlge_init_module);
4944module_exit(qlge_exit);