blob: bccbc4e2b31a736b60a7f948e258b3ae1e48c0ec [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
stephen hemmingerac409212010-10-21 07:50:54 +000099static int ql_wol(struct ql_adapter *qdev);
100static void qlge_set_multicast_list(struct net_device *ndev);
101
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400102/* This hardware semaphore causes exclusive access to
103 * resources shared between the NIC driver, MPI firmware,
104 * FCOE firmware and the FC driver.
105 */
106static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107{
108 u32 sem_bits = 0;
109
110 switch (sem_mask) {
111 case SEM_XGMAC0_MASK:
112 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 break;
114 case SEM_XGMAC1_MASK:
115 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 break;
117 case SEM_ICB_MASK:
118 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 break;
120 case SEM_MAC_ADDR_MASK:
121 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 break;
123 case SEM_FLASH_MASK:
124 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 break;
126 case SEM_PROBE_MASK:
127 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 break;
129 case SEM_RT_IDX_MASK:
130 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 break;
132 case SEM_PROC_REG_MASK:
133 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 break;
135 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000136 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400137 return -EINVAL;
138 }
139
140 ql_write32(qdev, SEM, sem_bits | sem_mask);
141 return !(ql_read32(qdev, SEM) & sem_bits);
142}
143
144int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000146 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 do {
148 if (!ql_sem_trylock(qdev, sem_mask))
149 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000150 udelay(100);
151 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400152 return -ETIMEDOUT;
153}
154
155void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156{
157 ql_write32(qdev, SEM, sem_mask);
158 ql_read32(qdev, SEM); /* flush */
159}
160
161/* This function waits for a specific bit to come ready
162 * in a given register. It is used mostly by the initialize
163 * process, but is also used in kernel thread API such as
164 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165 */
166int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167{
168 u32 temp;
169 int count = UDELAY_COUNT;
170
171 while (count) {
172 temp = ql_read32(qdev, reg);
173
174 /* check for errors */
175 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000176 netif_alert(qdev, probe, qdev->ndev,
177 "register 0x%.08x access error, value = 0x%.08x!.\n",
178 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400179 return -EIO;
180 } else if (temp & bit)
181 return 0;
182 udelay(UDELAY_DELAY);
183 count--;
184 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000185 netif_alert(qdev, probe, qdev->ndev,
186 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400187 return -ETIMEDOUT;
188}
189
190/* The CFG register is used to download TX and RX control blocks
191 * to the chip. This function waits for an operation to complete.
192 */
193static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194{
195 int count = UDELAY_COUNT;
196 u32 temp;
197
198 while (count) {
199 temp = ql_read32(qdev, CFG);
200 if (temp & CFG_LE)
201 return -EIO;
202 if (!(temp & bit))
203 return 0;
204 udelay(UDELAY_DELAY);
205 count--;
206 }
207 return -ETIMEDOUT;
208}
209
210
211/* Used to issue init control blocks to hw. Maps control block,
212 * sets address, triggers download, waits for completion.
213 */
214int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 u16 q_id)
216{
217 u64 map;
218 int status = 0;
219 int direction;
220 u32 mask;
221 u32 value;
222
223 direction =
224 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 PCI_DMA_FROMDEVICE;
226
227 map = pci_map_single(qdev->pdev, ptr, size, direction);
228 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000229 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400230 return -ENOMEM;
231 }
232
Ron Mercer4322c5b2009-07-02 06:06:06 +0000233 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 if (status)
235 return status;
236
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400237 status = ql_wait_cfg(qdev, bit);
238 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000239 netif_err(qdev, ifup, qdev->ndev,
240 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241 goto exit;
242 }
243
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400244 ql_write32(qdev, ICB_L, (u32) map);
245 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246
247 mask = CFG_Q_MASK | (bit << 16);
248 value = bit | (q_id << CFG_Q_SHIFT);
249 ql_write32(qdev, CFG, (mask | value));
250
251 /*
252 * Wait for the bit to clear after signaling hw.
253 */
254 status = ql_wait_cfg(qdev, bit);
255exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000256 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400257 pci_unmap_single(qdev->pdev, map, size, direction);
258 return status;
259}
260
261/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
262int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 u32 *value)
264{
265 u32 offset = 0;
266 int status;
267
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400268 switch (type) {
269 case MAC_ADDR_TYPE_MULTI_MAC:
270 case MAC_ADDR_TYPE_CAM_MAC:
271 {
272 status =
273 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800274 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800282 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 status =
287 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800296 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 status =
302 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800303 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400304 if (status)
305 goto exit;
306 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 status =
310 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800311 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400312 if (status)
313 goto exit;
314 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 }
316 break;
317 }
318 case MAC_ADDR_TYPE_VLAN:
319 case MAC_ADDR_TYPE_MULTI_FLTR:
320 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000321 netif_crit(qdev, ifup, qdev->ndev,
322 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400323 status = -EPERM;
324 }
325exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400326 return status;
327}
328
329/* Set up a MAC, multicast or VLAN address for the
330 * inbound frame matching.
331 */
332static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 u16 index)
334{
335 u32 offset = 0;
336 int status = 0;
337
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400338 switch (type) {
339 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000340 {
341 u32 upper = (addr[0] << 8) | addr[1];
342 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 (addr[4] << 8) | (addr[5]);
344
345 status =
346 ql_wait_reg_rdy(qdev,
347 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 if (status)
349 goto exit;
350 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 (index << MAC_ADDR_IDX_SHIFT) |
352 type | MAC_ADDR_E);
353 ql_write32(qdev, MAC_ADDR_DATA, lower);
354 status =
355 ql_wait_reg_rdy(qdev,
356 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 if (status)
358 goto exit;
359 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 (index << MAC_ADDR_IDX_SHIFT) |
361 type | MAC_ADDR_E);
362
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 goto exit;
369 break;
370 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400371 case MAC_ADDR_TYPE_CAM_MAC:
372 {
373 u32 cam_output;
374 u32 upper = (addr[0] << 8) | addr[1];
375 u32 lower =
376 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, lower);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 ql_write32(qdev, MAC_ADDR_DATA, upper);
396 status =
397 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800398 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400399 if (status)
400 goto exit;
401 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402 (index << MAC_ADDR_IDX_SHIFT) | /* index */
403 type); /* type */
404 /* This field should also include the queue id
405 and possibly the function id. Right now we hardcode
406 the route field to NIC core.
407 */
Ron Mercer76b26692009-10-08 09:54:40 +0000408 cam_output = (CAM_OUT_ROUTE_NIC |
409 (qdev->
410 func << CAM_OUT_FUNC_SHIFT) |
411 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000412 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000413 cam_output |= CAM_OUT_RV;
414 /* route to NIC core */
415 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400416 break;
417 }
418 case MAC_ADDR_TYPE_VLAN:
419 {
420 u32 enable_bit = *((u32 *) &addr[0]);
421 /* For VLAN, the addr actually holds a bit that
422 * either enables or disables the vlan id we are
423 * addressing. It's either MAC_ADDR_E on or off.
424 * That's bit-27 we're talking about.
425 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400426 status =
427 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800428 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400429 if (status)
430 goto exit;
431 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432 (index << MAC_ADDR_IDX_SHIFT) | /* index */
433 type | /* type */
434 enable_bit); /* enable/disable */
435 break;
436 }
437 case MAC_ADDR_TYPE_MULTI_FLTR:
438 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000439 netif_crit(qdev, ifup, qdev->ndev,
440 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400441 status = -EPERM;
442 }
443exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400444 return status;
445}
446
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000447/* Set or clear MAC address in hardware. We sometimes
448 * have to clear it to prevent wrong frame routing
449 * especially in a bonding environment.
450 */
451static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452{
453 int status;
454 char zero_mac_addr[ETH_ALEN];
455 char *addr;
456
457 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000458 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000459 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000461 } else {
462 memset(zero_mac_addr, 0, ETH_ALEN);
463 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000464 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000466 }
467 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 if (status)
469 return status;
470 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000474 netif_err(qdev, ifup, qdev->ndev,
475 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000476 return status;
477}
478
Ron Mercer6a473302009-07-02 06:06:12 +0000479void ql_link_on(struct ql_adapter *qdev)
480{
Joe Perchesae9540f72010-02-09 11:49:52 +0000481 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000482 netif_carrier_on(qdev->ndev);
483 ql_set_mac_addr(qdev, 1);
484}
485
486void ql_link_off(struct ql_adapter *qdev)
487{
Joe Perchesae9540f72010-02-09 11:49:52 +0000488 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 switch (mask) {
526 case RT_IDX_CAM_HIT:
527 {
528 value = RT_IDX_DST_CAM_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 break;
532 }
533 case RT_IDX_VALID: /* Promiscuous Mode frames. */
534 {
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 break;
539 }
540 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541 {
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 break;
546 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000547 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 {
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_IP_CSUM_ERR_SLOT <<
552 RT_IDX_IDX_SHIFT); /* index */
553 break;
554 }
555 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 RT_IDX_IDX_SHIFT); /* index */
561 break;
562 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400563 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000572 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000579 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585 {
586 value = RT_IDX_DST_RSS | /* dest */
587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case 0: /* Clear the E-bit on an entry. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (index << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000599 netif_err(qdev, ifup, qdev->ndev,
600 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400601 status = -EPERM;
602 goto exit;
603 }
604
605 if (value) {
606 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 if (status)
608 goto exit;
609 value |= (enable ? RT_IDX_E : 0);
610 ql_write32(qdev, RT_IDX, value);
611 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 }
613exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400614 return status;
615}
616
617static void ql_enable_interrupts(struct ql_adapter *qdev)
618{
619 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620}
621
622static void ql_disable_interrupts(struct ql_adapter *qdev)
623{
624 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625}
626
627/* If we're running with multiple MSI-X vectors then we enable on the fly.
628 * Otherwise, we may have multiple outstanding workers and don't want to
629 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300630 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400631 * a worker finishes. Once it hits zero we enable the interrupt.
632 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700633u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400634{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635 u32 var = 0;
636 unsigned long hw_flags = 0;
637 struct intr_context *ctx = qdev->intr_context + intr;
638
639 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 /* Always enable if we're MSIX multi interrupts and
641 * it's not the default (zeroeth) interrupt.
642 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400643 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700644 ctx->intr_en_mask);
645 var = ql_read32(qdev, STS);
646 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400647 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700648
649 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 ql_write32(qdev, INTR_EN,
652 ctx->intr_en_mask);
653 var = ql_read32(qdev, STS);
654 }
655 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657}
658
659static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660{
661 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 /* HW disables for us if we're MSIX multi interrupts and
665 * it's not the default (zeroeth) interrupt.
666 */
667 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 return 0;
669
670 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000671 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700672 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400673 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 var = ql_read32(qdev, STS);
676 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700677 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000678 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 return var;
680}
681
682static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683{
684 int i;
685 for (i = 0; i < qdev->intr_count; i++) {
686 /* The enable call does a atomic_dec_and_test
687 * and enables only if the result is zero.
688 * So we precharge it here.
689 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700690 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 i == 0))
692 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 ql_enable_completion_interrupt(qdev, i);
694 }
695
696}
697
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000698static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699{
700 int status, i;
701 u16 csum = 0;
702 __le16 *flash = (__le16 *)&qdev->flash;
703
704 status = strncmp((char *)&qdev->flash, str, 4);
705 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000706 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000707 return status;
708 }
709
710 for (i = 0; i < size; i++)
711 csum += le16_to_cpu(*flash++);
712
713 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000714 netif_err(qdev, ifup, qdev->ndev,
715 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000716
717 return csum;
718}
719
Ron Mercer26351472009-02-02 13:53:57 -0800720static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 if (status)
734 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800735 /* This data is stored on flash as an array of
736 * __le32. Since ql_read32() returns cpu endian
737 * we need to swap it back.
738 */
739 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400740exit:
741 return status;
742}
743
Ron Mercercdca8d02009-03-02 08:07:31 +0000744static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745{
746 u32 i, size;
747 int status;
748 __le32 *p = (__le32 *)&qdev->flash;
749 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000750 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000751
752 /* Get flash offset for function and adjust
753 * for dword access.
754 */
Ron Mercere4552f52009-06-09 05:39:32 +0000755 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000756 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 else
758 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 return -ETIMEDOUT;
762
763 size = sizeof(struct flash_params_8000) / sizeof(u32);
764 for (i = 0; i < size; i++, p++) {
765 status = ql_read_flash_word(qdev, i+offset, p);
766 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000767 netif_err(qdev, ifup, qdev->ndev,
768 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000769 goto exit;
770 }
771 }
772
773 status = ql_validate_flash(qdev,
774 sizeof(struct flash_params_8000) / sizeof(u16),
775 "8000");
776 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000777 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000778 status = -EINVAL;
779 goto exit;
780 }
781
Ron Mercer542512e2009-06-09 05:39:33 +0000782 /* Extract either manufacturer or BOFM modified
783 * MAC address.
784 */
785 if (qdev->flash.flash_params_8000.data_type1 == 2)
786 memcpy(mac_addr,
787 qdev->flash.flash_params_8000.mac_addr1,
788 qdev->ndev->addr_len);
789 else
790 memcpy(mac_addr,
791 qdev->flash.flash_params_8000.mac_addr,
792 qdev->ndev->addr_len);
793
794 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000795 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000796 status = -EINVAL;
797 goto exit;
798 }
799
800 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000801 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000802 qdev->ndev->addr_len);
803
804exit:
805 ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 return status;
807}
808
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000809static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400810{
811 int i;
812 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800813 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800814 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000815 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816
817 /* Second function's parameters follow the first
818 * function's.
819 */
Ron Mercere4552f52009-06-09 05:39:32 +0000820 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000821 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400822
823 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 return -ETIMEDOUT;
825
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800827 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000829 netif_err(qdev, ifup, qdev->ndev,
830 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400831 goto exit;
832 }
833
834 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835
836 status = ql_validate_flash(qdev,
837 sizeof(struct flash_params_8012) / sizeof(u16),
838 "8012");
839 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000840 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000841 status = -EINVAL;
842 goto exit;
843 }
844
845 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 status = -EINVAL;
847 goto exit;
848 }
849
850 memcpy(qdev->ndev->dev_addr,
851 qdev->flash.flash_params_8012.mac_addr,
852 qdev->ndev->addr_len);
853
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400854exit:
855 ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 return status;
857}
858
859/* xgmac register are located behind the xgmac_addr and xgmac_data
860 * register pair. Each read/write requires us to wait for the ready
861 * bit before reading/writing the data.
862 */
863static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864{
865 int status;
866 /* wait for reg to come ready */
867 status = ql_wait_reg_rdy(qdev,
868 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 if (status)
870 return status;
871 /* write the data to the data reg */
872 ql_write32(qdev, XGMAC_DATA, data);
873 /* trigger the write */
874 ql_write32(qdev, XGMAC_ADDR, reg);
875 return status;
876}
877
878/* xgmac register are located behind the xgmac_addr and xgmac_data
879 * register pair. Each read/write requires us to wait for the ready
880 * bit before reading/writing the data.
881 */
882int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883{
884 int status = 0;
885 /* wait for reg to come ready */
886 status = ql_wait_reg_rdy(qdev,
887 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 if (status)
889 goto exit;
890 /* set up for reg read */
891 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 /* wait for reg to come ready */
893 status = ql_wait_reg_rdy(qdev,
894 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 if (status)
896 goto exit;
897 /* get the data */
898 *data = ql_read32(qdev, XGMAC_DATA);
899exit:
900 return status;
901}
902
903/* This is used for reading the 64-bit statistics regs. */
904int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905{
906 int status = 0;
907 u32 hi = 0;
908 u32 lo = 0;
909
910 status = ql_read_xgmac_reg(qdev, reg, &lo);
911 if (status)
912 goto exit;
913
914 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 if (status)
916 goto exit;
917
918 *data = (u64) lo | ((u64) hi << 32);
919
920exit:
921 return status;
922}
923
Ron Mercercdca8d02009-03-02 08:07:31 +0000924static int ql_8000_port_initialize(struct ql_adapter *qdev)
925{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000926 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000927 /*
928 * Get MPI firmware version for driver banner
929 * and ethool info.
930 */
931 status = ql_mb_about_fw(qdev);
932 if (status)
933 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000934 status = ql_mb_get_fw_state(qdev);
935 if (status)
936 goto exit;
937 /* Wake up a worker to get/set the TX/RX frame sizes. */
938 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939exit:
940 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000941}
942
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400943/* Take the MAC Core out of reset.
944 * Enable statistics counting.
945 * Take the transmitter/receiver out of reset.
946 * This functionality may be done in the MPI firmware at a
947 * later date.
948 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000949static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400950{
951 int status = 0;
952 u32 data;
953
954 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 /* Another function has the semaphore, so
956 * wait for the port init bit to come ready.
957 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000958 netif_info(qdev, link, qdev->ndev,
959 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400960 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000962 netif_crit(qdev, link, qdev->ndev,
963 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964 }
965 return status;
966 }
967
Joe Perchesae9540f72010-02-09 11:49:52 +0000968 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400969 /* Set the core reset. */
970 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 if (status)
972 goto end;
973 data |= GLOBAL_CFG_RESET;
974 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 if (status)
976 goto end;
977
978 /* Clear the core reset and turn on jumbo for receiver. */
979 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981 data |= GLOBAL_CFG_TX_STAT_EN;
982 data |= GLOBAL_CFG_RX_STAT_EN;
983 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 if (status)
985 goto end;
986
987 /* Enable transmitter, and clear it's reset. */
988 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 if (status)
990 goto end;
991 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992 data |= TX_CFG_EN; /* Enable the transmitter. */
993 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 if (status)
995 goto end;
996
997 /* Enable receiver and clear it's reset. */
998 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 if (status)
1000 goto end;
1001 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002 data |= RX_CFG_EN; /* Enable the receiver. */
1003 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 if (status)
1005 goto end;
1006
1007 /* Turn on jumbo. */
1008 status =
1009 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 if (status)
1011 goto end;
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 if (status)
1015 goto end;
1016
1017 /* Signal to the world that the port is enabled. */
1018 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019end:
1020 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 return status;
1022}
1023
Ron Mercer7c734352009-10-19 03:32:19 +00001024static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025{
1026 return PAGE_SIZE << qdev->lbq_buf_order;
1027}
1028
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001029/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001030static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031{
1032 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 rx_ring->lbq_curr_idx++;
1034 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 rx_ring->lbq_curr_idx = 0;
1036 rx_ring->lbq_free_cnt++;
1037 return lbq_desc;
1038}
1039
Ron Mercer7c734352009-10-19 03:32:19 +00001040static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 struct rx_ring *rx_ring)
1042{
1043 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001046 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001047 rx_ring->lbq_buf_size,
1048 PCI_DMA_FROMDEVICE);
1049
1050 /* If it's the last chunk of our master page then
1051 * we unmap it.
1052 */
1053 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 == ql_lbq_block_size(qdev))
1055 pci_unmap_page(qdev->pdev,
1056 lbq_desc->p.pg_chunk.map,
1057 ql_lbq_block_size(qdev),
1058 PCI_DMA_FROMDEVICE);
1059 return lbq_desc;
1060}
1061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001062/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001063static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064{
1065 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 rx_ring->sbq_curr_idx++;
1067 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 rx_ring->sbq_curr_idx = 0;
1069 rx_ring->sbq_free_cnt++;
1070 return sbq_desc;
1071}
1072
1073/* Update an rx ring index. */
1074static void ql_update_cq(struct rx_ring *rx_ring)
1075{
1076 rx_ring->cnsmr_idx++;
1077 rx_ring->curr_entry++;
1078 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 rx_ring->cnsmr_idx = 0;
1080 rx_ring->curr_entry = rx_ring->cq_base;
1081 }
1082}
1083
1084static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085{
1086 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087}
1088
Ron Mercer7c734352009-10-19 03:32:19 +00001089static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 struct bq_desc *lbq_desc)
1091{
1092 if (!rx_ring->pg_chunk.page) {
1093 u64 map;
1094 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 GFP_ATOMIC,
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
Joe Perchesae9540f72010-02-09 11:49:52 +00001109 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001111 return -ENOMEM;
1112 }
1113 rx_ring->pg_chunk.map = map;
1114 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 }
1116
1117 /* Copy the current master pg_chunk info
1118 * to the current descriptor.
1119 */
1120 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122 /* Adjust the master page chunk for next
1123 * buffer get.
1124 */
1125 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 rx_ring->pg_chunk.page = NULL;
1128 lbq_desc->p.pg_chunk.last_flag = 1;
1129 } else {
1130 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 get_page(rx_ring->pg_chunk.page);
1132 lbq_desc->p.pg_chunk.last_flag = 0;
1133 }
1134 return 0;
1135}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001136/* Process (refill) a large buffer queue. */
1137static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138{
Ron Mercer49f21862009-02-23 10:42:16 +00001139 u32 clean_idx = rx_ring->lbq_clean_idx;
1140 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001141 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001142 u64 map;
1143 int i;
1144
Ron Mercer7c734352009-10-19 03:32:19 +00001145 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001146 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001147 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 "lbq: try cleaning clean_idx = %d.\n",
1149 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001151 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001152 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001153 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 return;
1157 }
Ron Mercer7c734352009-10-19 03:32:19 +00001158
1159 map = lbq_desc->p.pg_chunk.map +
1160 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001161 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001163 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001164 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001165
1166 pci_dma_sync_single_for_device(qdev->pdev, map,
1167 rx_ring->lbq_buf_size,
1168 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001169 clean_idx++;
1170 if (clean_idx == rx_ring->lbq_len)
1171 clean_idx = 0;
1172 }
1173
1174 rx_ring->lbq_clean_idx = clean_idx;
1175 rx_ring->lbq_prod_idx += 16;
1176 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001178 rx_ring->lbq_free_cnt -= 16;
1179 }
1180
1181 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001182 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 "lbq: updating prod idx = %d.\n",
1184 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001185 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 }
1188}
1189
1190/* Process (refill) a small buffer queue. */
1191static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192{
Ron Mercer49f21862009-02-23 10:42:16 +00001193 u32 clean_idx = rx_ring->sbq_clean_idx;
1194 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001195 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001196 u64 map;
1197 int i;
1198
1199 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001200 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001201 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 "sbq: try cleaning clean_idx = %d.\n",
1204 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001205 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001206 netif_printk(qdev, rx_status, KERN_DEBUG,
1207 qdev->ndev,
1208 "sbq: getting new skb for index %d.\n",
1209 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001210 sbq_desc->p.skb =
1211 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001212 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Ron Mercer4f848c02010-01-02 10:37:43 +00001436/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001437static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 struct rx_ring *rx_ring,
1439 struct ib_mac_iocb_rsp *ib_mac_rsp,
1440 u32 length,
1441 u16 vlan_id)
1442{
1443 struct sk_buff *skb;
1444 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001445 struct napi_struct *napi = &rx_ring->napi;
1446
1447 napi->dev = qdev->ndev;
1448
1449 skb = napi_get_frags(napi);
1450 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001451 netif_err(qdev, drv, qdev->ndev,
1452 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001453 rx_ring->rx_dropped++;
1454 put_page(lbq_desc->p.pg_chunk.page);
1455 return;
1456 }
1457 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001458 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1459 lbq_desc->p.pg_chunk.page,
1460 lbq_desc->p.pg_chunk.offset,
1461 length);
Ron Mercer63526712010-01-02 10:37:44 +00001462
1463 skb->len += length;
1464 skb->data_len += length;
1465 skb->truesize += length;
1466 skb_shinfo(skb)->nr_frags++;
1467
1468 rx_ring->rx_packets++;
1469 rx_ring->rx_bytes += length;
1470 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001472 if (vlan_id != 0xffff)
1473 __vlan_hwaccel_put_tag(skb, vlan_id);
1474 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001475}
1476
1477/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001478static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1479 struct rx_ring *rx_ring,
1480 struct ib_mac_iocb_rsp *ib_mac_rsp,
1481 u32 length,
1482 u16 vlan_id)
1483{
1484 struct net_device *ndev = qdev->ndev;
1485 struct sk_buff *skb = NULL;
1486 void *addr;
1487 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1488 struct napi_struct *napi = &rx_ring->napi;
1489
1490 skb = netdev_alloc_skb(ndev, length);
1491 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001492 netif_err(qdev, drv, qdev->ndev,
1493 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001494 rx_ring->rx_dropped++;
1495 put_page(lbq_desc->p.pg_chunk.page);
1496 return;
1497 }
1498
1499 addr = lbq_desc->p.pg_chunk.va;
1500 prefetch(addr);
1501
1502
1503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001505 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001506 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001507 rx_ring->rx_errors++;
1508 goto err_out;
1509 }
1510
1511 /* The max framesize filter on this chip is set higher than
1512 * MTU since FCoE uses 2k frames.
1513 */
1514 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001515 netif_err(qdev, drv, qdev->ndev,
1516 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001517 rx_ring->rx_dropped++;
1518 goto err_out;
1519 }
1520 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1523 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001524 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1525 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1526 length-ETH_HLEN);
1527 skb->len += length-ETH_HLEN;
1528 skb->data_len += length-ETH_HLEN;
1529 skb->truesize += length-ETH_HLEN;
1530
1531 rx_ring->rx_packets++;
1532 rx_ring->rx_bytes += skb->len;
1533 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001534 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001535
Michał Mirosław88230fd2011-04-18 13:31:21 +00001536 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001537 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1538 /* TCP frame. */
1539 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1543 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1544 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1545 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001546 struct iphdr *iph =
1547 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
Ron Mercer4f848c02010-01-02 10:37:43 +00001548 if (!(iph->frag_off &
1549 cpu_to_be16(IP_MF|IP_OFFSET))) {
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001551 netif_printk(qdev, rx_status, KERN_DEBUG,
1552 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001553 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 }
1555 }
1556 }
1557
1558 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001559 if (vlan_id != 0xffff)
1560 __vlan_hwaccel_put_tag(skb, vlan_id);
1561 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1562 napi_gro_receive(napi, skb);
1563 else
1564 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001565 return;
1566err_out:
1567 dev_kfree_skb_any(skb);
1568 put_page(lbq_desc->p.pg_chunk.page);
1569}
1570
1571/* Process an inbound completion from an rx ring. */
1572static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp,
1575 u32 length,
1576 u16 vlan_id)
1577{
1578 struct net_device *ndev = qdev->ndev;
1579 struct sk_buff *skb = NULL;
1580 struct sk_buff *new_skb = NULL;
1581 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1582
1583 skb = sbq_desc->p.skb;
1584 /* Allocate new_skb and copy */
1585 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1586 if (new_skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001587 netif_err(qdev, probe, qdev->ndev,
1588 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001589 rx_ring->rx_dropped++;
1590 return;
1591 }
1592 skb_reserve(new_skb, NET_IP_ALIGN);
1593 memcpy(skb_put(new_skb, length), skb->data, length);
1594 skb = new_skb;
1595
1596 /* Frame error, so drop the packet. */
1597 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001598 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001599 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001600 dev_kfree_skb_any(skb);
1601 rx_ring->rx_errors++;
1602 return;
1603 }
1604
1605 /* loopback self test for ethtool */
1606 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1607 ql_check_lb_frame(qdev, skb);
1608 dev_kfree_skb_any(skb);
1609 return;
1610 }
1611
1612 /* The max framesize filter on this chip is set higher than
1613 * MTU since FCoE uses 2k frames.
1614 */
1615 if (skb->len > ndev->mtu + ETH_HLEN) {
1616 dev_kfree_skb_any(skb);
1617 rx_ring->rx_dropped++;
1618 return;
1619 }
1620
1621 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001622 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001623 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1624 "%s Multicast.\n",
1625 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1626 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1627 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1628 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1629 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1630 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001631 }
1632 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001633 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1634 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001635
1636 rx_ring->rx_packets++;
1637 rx_ring->rx_bytes += skb->len;
1638 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001639 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001640
1641 /* If rx checksum is on, and there are no
1642 * csum or frame errors.
1643 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001644 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001645 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1646 /* TCP frame. */
1647 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001648 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1649 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001650 skb->ip_summed = CHECKSUM_UNNECESSARY;
1651 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1652 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1653 /* Unfragmented ipv4 UDP frame. */
1654 struct iphdr *iph = (struct iphdr *) skb->data;
1655 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001656 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001657 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001658 netif_printk(qdev, rx_status, KERN_DEBUG,
1659 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001660 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001661 }
1662 }
1663 }
1664
1665 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001666 if (vlan_id != 0xffff)
1667 __vlan_hwaccel_put_tag(skb, vlan_id);
1668 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1669 napi_gro_receive(&rx_ring->napi, skb);
1670 else
1671 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001672}
1673
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001674static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001675{
1676 void *temp_addr = skb->data;
1677
1678 /* Undo the skb_reserve(skb,32) we did before
1679 * giving to hardware, and realign data on
1680 * a 2-byte boundary.
1681 */
1682 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1683 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1684 skb_copy_to_linear_data(skb, temp_addr,
1685 (unsigned int)len);
1686}
1687
1688/*
1689 * This function builds an skb for the given inbound
1690 * completion. It will be rewritten for readability in the near
1691 * future, but for not it works well.
1692 */
1693static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1694 struct rx_ring *rx_ring,
1695 struct ib_mac_iocb_rsp *ib_mac_rsp)
1696{
1697 struct bq_desc *lbq_desc;
1698 struct bq_desc *sbq_desc;
1699 struct sk_buff *skb = NULL;
1700 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1701 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1702
1703 /*
1704 * Handle the header buffer if present.
1705 */
1706 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1707 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001708 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1709 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001710 /*
1711 * Headers fit nicely into a small buffer.
1712 */
1713 sbq_desc = ql_get_curr_sbuf(rx_ring);
1714 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001715 dma_unmap_addr(sbq_desc, mapaddr),
1716 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001717 PCI_DMA_FROMDEVICE);
1718 skb = sbq_desc->p.skb;
1719 ql_realign_skb(skb, hdr_len);
1720 skb_put(skb, hdr_len);
1721 sbq_desc->p.skb = NULL;
1722 }
1723
1724 /*
1725 * Handle the data buffer(s).
1726 */
1727 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001728 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1729 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001730 return skb;
1731 }
1732
1733 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1734 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001735 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1736 "Headers in small, data of %d bytes in small, combine them.\n",
1737 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001738 /*
1739 * Data is less than small buffer size so it's
1740 * stuffed in a small buffer.
1741 * For this case we append the data
1742 * from the "data" small buffer to the "header" small
1743 * buffer.
1744 */
1745 sbq_desc = ql_get_curr_sbuf(rx_ring);
1746 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001747 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001748 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001749 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 (sbq_desc, maplen),
1751 PCI_DMA_FROMDEVICE);
1752 memcpy(skb_put(skb, length),
1753 sbq_desc->p.skb->data, length);
1754 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001755 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001756 (sbq_desc,
1757 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001758 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001759 (sbq_desc,
1760 maplen),
1761 PCI_DMA_FROMDEVICE);
1762 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001763 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1764 "%d bytes in a single small buffer.\n",
1765 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001766 sbq_desc = ql_get_curr_sbuf(rx_ring);
1767 skb = sbq_desc->p.skb;
1768 ql_realign_skb(skb, length);
1769 skb_put(skb, length);
1770 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001771 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001772 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001773 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001774 maplen),
1775 PCI_DMA_FROMDEVICE);
1776 sbq_desc->p.skb = NULL;
1777 }
1778 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1779 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001780 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1781 "Header in small, %d bytes in large. Chain large to small!\n",
1782 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001783 /*
1784 * The data is in a single large buffer. We
1785 * chain it to the header buffer's skb and let
1786 * it rip.
1787 */
Ron Mercer7c734352009-10-19 03:32:19 +00001788 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001789 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1790 "Chaining page at offset = %d, for %d bytes to skb.\n",
1791 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001792 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1793 lbq_desc->p.pg_chunk.offset,
1794 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 skb->len += length;
1796 skb->data_len += length;
1797 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001798 } else {
1799 /*
1800 * The headers and data are in a single large buffer. We
1801 * copy it to a new skb and let it go. This can happen with
1802 * jumbo mtu on a non-TCP/UDP frame.
1803 */
Ron Mercer7c734352009-10-19 03:32:19 +00001804 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 skb = netdev_alloc_skb(qdev->ndev, length);
1806 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001807 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1808 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001809 return NULL;
1810 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001811 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001812 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001813 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001814 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001815 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001817 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1818 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1819 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001820 skb_fill_page_desc(skb, 0,
1821 lbq_desc->p.pg_chunk.page,
1822 lbq_desc->p.pg_chunk.offset,
1823 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 skb->len += length;
1825 skb->data_len += length;
1826 skb->truesize += length;
1827 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 __pskb_pull_tail(skb,
1829 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1830 VLAN_ETH_HLEN : ETH_HLEN);
1831 }
1832 } else {
1833 /*
1834 * The data is in a chain of large buffers
1835 * pointed to by a small buffer. We loop
1836 * thru and chain them to the our small header
1837 * buffer's skb.
1838 * frags: There are 18 max frags and our small
1839 * buffer will hold 32 of them. The thing is,
1840 * we'll use 3 max for our 9000 byte jumbo
1841 * frames. If the MTU goes up we could
1842 * eventually be in trouble.
1843 */
Ron Mercer7c734352009-10-19 03:32:19 +00001844 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001845 sbq_desc = ql_get_curr_sbuf(rx_ring);
1846 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001847 dma_unmap_addr(sbq_desc, mapaddr),
1848 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 PCI_DMA_FROMDEVICE);
1850 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1851 /*
1852 * This is an non TCP/UDP IP frame, so
1853 * the headers aren't split into a small
1854 * buffer. We have to use the small buffer
1855 * that contains our sg list as our skb to
1856 * send upstairs. Copy the sg list here to
1857 * a local buffer and use it to find the
1858 * pages to chain.
1859 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001860 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1861 "%d bytes of headers & data in chain of large.\n",
1862 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001864 sbq_desc->p.skb = NULL;
1865 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 }
1867 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001868 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1869 size = (length < rx_ring->lbq_buf_size) ? length :
1870 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001871
Joe Perchesae9540f72010-02-09 11:49:52 +00001872 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873 "Adding page %d to skb for %d bytes.\n",
1874 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001875 skb_fill_page_desc(skb, i,
1876 lbq_desc->p.pg_chunk.page,
1877 lbq_desc->p.pg_chunk.offset,
1878 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001879 skb->len += size;
1880 skb->data_len += size;
1881 skb->truesize += size;
1882 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001883 i++;
1884 }
1885 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1886 VLAN_ETH_HLEN : ETH_HLEN);
1887 }
1888 return skb;
1889}
1890
1891/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001892static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001893 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001894 struct ib_mac_iocb_rsp *ib_mac_rsp,
1895 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896{
1897 struct net_device *ndev = qdev->ndev;
1898 struct sk_buff *skb = NULL;
1899
1900 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1901
1902 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1903 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001906 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001907 return;
1908 }
1909
Ron Mercera32959c2009-06-09 05:39:27 +00001910 /* Frame error, so drop the packet. */
1911 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001912 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f72010-02-09 11:49:52 +00001913 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001914 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001915 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001916 return;
1917 }
Ron Mercerec33a492009-06-09 05:39:28 +00001918
1919 /* The max framesize filter on this chip is set higher than
1920 * MTU since FCoE uses 2k frames.
1921 */
1922 if (skb->len > ndev->mtu + ETH_HLEN) {
1923 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001924 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001925 return;
1926 }
1927
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001928 /* loopback self test for ethtool */
1929 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1930 ql_check_lb_frame(qdev, skb);
1931 dev_kfree_skb_any(skb);
1932 return;
1933 }
1934
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001935 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001936 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1938 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1939 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1940 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1942 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001944 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001945 }
1946 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001947 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1948 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001949 }
Ron Mercerd555f592009-03-09 10:59:19 +00001950
Ron Mercerd555f592009-03-09 10:59:19 +00001951 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001952 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001953
1954 /* If rx checksum is on, and there are no
1955 * csum or frame errors.
1956 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001957 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001958 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1959 /* TCP frame. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001961 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1962 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001963 skb->ip_summed = CHECKSUM_UNNECESSARY;
1964 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1965 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1966 /* Unfragmented ipv4 UDP frame. */
1967 struct iphdr *iph = (struct iphdr *) skb->data;
1968 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001969 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00001970 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001971 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1972 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001973 }
1974 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001975 }
Ron Mercerd555f592009-03-09 10:59:19 +00001976
Ron Mercer885ee392009-11-03 13:49:31 +00001977 rx_ring->rx_packets++;
1978 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001979 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001980 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1981 __vlan_hwaccel_put_tag(skb, vlan_id);
1982 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1983 napi_gro_receive(&rx_ring->napi, skb);
1984 else
1985 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001986}
1987
Ron Mercer4f848c02010-01-02 10:37:43 +00001988/* Process an inbound completion from an rx ring. */
1989static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1990 struct rx_ring *rx_ring,
1991 struct ib_mac_iocb_rsp *ib_mac_rsp)
1992{
1993 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1994 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1995 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1996 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1997
1998 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1999
2000 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2001 /* The data and headers are split into
2002 * separate buffers.
2003 */
2004 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2005 vlan_id);
2006 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2007 /* The data fit in a single small buffer.
2008 * Allocate a new skb, copy the data and
2009 * return the buffer to the free pool.
2010 */
2011 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2012 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002013 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2014 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2015 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2016 /* TCP packet in a page chunk that's been checksummed.
2017 * Tack it on to our GRO skb and let it go.
2018 */
2019 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2020 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002021 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2022 /* Non-TCP packet in a page chunk. Allocate an
2023 * skb, tack it on frags, and send it up.
2024 */
2025 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2026 length, vlan_id);
2027 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002028 /* Non-TCP/UDP large frames that span multiple buffers
2029 * can be processed corrrectly by the split frame logic.
2030 */
2031 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2032 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002033 }
2034
2035 return (unsigned long)length;
2036}
2037
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002038/* Process an outbound completion from an rx ring. */
2039static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2040 struct ob_mac_iocb_rsp *mac_rsp)
2041{
2042 struct tx_ring *tx_ring;
2043 struct tx_ring_desc *tx_ring_desc;
2044
2045 QL_DUMP_OB_MAC_RSP(mac_rsp);
2046 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2047 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2048 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002049 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2050 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002051 dev_kfree_skb(tx_ring_desc->skb);
2052 tx_ring_desc->skb = NULL;
2053
2054 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2055 OB_MAC_IOCB_RSP_S |
2056 OB_MAC_IOCB_RSP_L |
2057 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2058 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002059 netif_warn(qdev, tx_done, qdev->ndev,
2060 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002061 }
2062 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002063 netif_warn(qdev, tx_done, qdev->ndev,
2064 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002065 }
2066 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002067 netif_warn(qdev, tx_done, qdev->ndev,
2068 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002069 }
2070 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002071 netif_warn(qdev, tx_done, qdev->ndev,
2072 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002073 }
2074 }
2075 atomic_inc(&tx_ring->tx_count);
2076}
2077
2078/* Fire up a handler to reset the MPI processor. */
2079void ql_queue_fw_error(struct ql_adapter *qdev)
2080{
Ron Mercer6a473302009-07-02 06:06:12 +00002081 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002082 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2083}
2084
2085void ql_queue_asic_error(struct ql_adapter *qdev)
2086{
Ron Mercer6a473302009-07-02 06:06:12 +00002087 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002088 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002089 /* Clear adapter up bit to signal the recovery
2090 * process that it shouldn't kill the reset worker
2091 * thread
2092 */
2093 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002094 /* Set asic recovery bit to indicate reset process that we are
2095 * in fatal error recovery process rather than normal close
2096 */
2097 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002098 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2099}
2100
2101static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2102 struct ib_ae_iocb_rsp *ib_ae_rsp)
2103{
2104 switch (ib_ae_rsp->event) {
2105 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002106 netif_err(qdev, rx_err, qdev->ndev,
2107 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002108 ql_queue_fw_error(qdev);
2109 return;
2110
2111 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002112 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2113 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002114 ql_queue_asic_error(qdev);
2115 return;
2116
2117 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002118 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002119 ql_queue_asic_error(qdev);
2120 break;
2121
2122 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002123 netdev_err(qdev->ndev, "PCI error occurred when reading "
2124 "anonymous buffers from rx_ring %d.\n",
2125 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002126 ql_queue_asic_error(qdev);
2127 break;
2128
2129 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002130 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2131 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 ql_queue_asic_error(qdev);
2133 break;
2134 }
2135}
2136
2137static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2138{
2139 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002140 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002141 struct ob_mac_iocb_rsp *net_rsp = NULL;
2142 int count = 0;
2143
Ron Mercer1e213302009-03-09 10:59:21 +00002144 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 /* While there are entries in the completion queue. */
2146 while (prod != rx_ring->cnsmr_idx) {
2147
Joe Perchesae9540f72010-02-09 11:49:52 +00002148 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2149 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2150 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002151
2152 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2153 rmb();
2154 switch (net_rsp->opcode) {
2155
2156 case OPCODE_OB_MAC_TSO_IOCB:
2157 case OPCODE_OB_MAC_IOCB:
2158 ql_process_mac_tx_intr(qdev, net_rsp);
2159 break;
2160 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2162 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2163 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002164 }
2165 count++;
2166 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002167 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002168 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002169 if (!net_rsp)
2170 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002171 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002172 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002173 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002174 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002175 /*
2176 * The queue got stopped because the tx_ring was full.
2177 * Wake it up, because it's now at least 25% empty.
2178 */
Ron Mercer1e213302009-03-09 10:59:21 +00002179 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002180 }
2181
2182 return count;
2183}
2184
2185static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2186{
2187 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002188 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002189 struct ql_net_rsp_iocb *net_rsp;
2190 int count = 0;
2191
2192 /* While there are entries in the completion queue. */
2193 while (prod != rx_ring->cnsmr_idx) {
2194
Joe Perchesae9540f72010-02-09 11:49:52 +00002195 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2197 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002198
2199 net_rsp = rx_ring->curr_entry;
2200 rmb();
2201 switch (net_rsp->opcode) {
2202 case OPCODE_IB_MAC_IOCB:
2203 ql_process_mac_rx_intr(qdev, rx_ring,
2204 (struct ib_mac_iocb_rsp *)
2205 net_rsp);
2206 break;
2207
2208 case OPCODE_IB_AE_IOCB:
2209 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2210 net_rsp);
2211 break;
2212 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2214 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2215 net_rsp->opcode);
2216 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002217 }
2218 count++;
2219 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002220 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002221 if (count == budget)
2222 break;
2223 }
2224 ql_update_buffer_queues(qdev, rx_ring);
2225 ql_write_cq_idx(rx_ring);
2226 return count;
2227}
2228
2229static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2230{
2231 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2232 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002233 struct rx_ring *trx_ring;
2234 int i, work_done = 0;
2235 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002236
Joe Perchesae9540f72010-02-09 11:49:52 +00002237 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2238 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239
Ron Mercer39aa8162009-08-27 11:02:11 +00002240 /* Service the TX rings first. They start
2241 * right after the RSS rings. */
2242 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2243 trx_ring = &qdev->rx_ring[i];
2244 /* If this TX completion ring belongs to this vector and
2245 * it's not empty then service it.
2246 */
2247 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2248 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2249 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002250 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2251 "%s: Servicing TX completion ring %d.\n",
2252 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002253 ql_clean_outbound_rx_ring(trx_ring);
2254 }
2255 }
2256
2257 /*
2258 * Now service the RSS ring if it's active.
2259 */
2260 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2261 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002262 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2263 "%s: Servicing RX completion ring %d.\n",
2264 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002265 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2266 }
2267
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002268 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002269 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002270 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2271 }
2272 return work_done;
2273}
2274
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002275static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002276{
2277 struct ql_adapter *qdev = netdev_priv(ndev);
2278
Jiri Pirko18c49b92011-07-21 03:24:11 +00002279 if (features & NETIF_F_HW_VLAN_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002280 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002281 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002282 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002283 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2284 }
2285}
2286
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002287static netdev_features_t qlge_fix_features(struct net_device *ndev,
2288 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002289{
2290 /*
2291 * Since there is no support for separate rx/tx vlan accel
2292 * enable/disable make sure tx flag is always in same state as rx.
2293 */
2294 if (features & NETIF_F_HW_VLAN_RX)
2295 features |= NETIF_F_HW_VLAN_TX;
2296 else
2297 features &= ~NETIF_F_HW_VLAN_TX;
2298
2299 return features;
2300}
2301
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002302static int qlge_set_features(struct net_device *ndev,
2303 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002304{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002305 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002306
2307 if (changed & NETIF_F_HW_VLAN_RX)
2308 qlge_vlan_mode(ndev, features);
2309
2310 return 0;
2311}
2312
Jiri Pirko8e586132011-12-08 19:52:37 -05002313static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002314{
2315 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002316 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002317
Jiri Pirko8e586132011-12-08 19:52:37 -05002318 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2319 MAC_ADDR_TYPE_VLAN, vid);
2320 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002321 netif_err(qdev, ifup, qdev->ndev,
2322 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002323 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002324}
2325
Jiri Pirko8e586132011-12-08 19:52:37 -05002326static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002327{
2328 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002329 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002330 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002331
Ron Mercercc288f52009-02-23 10:42:14 +00002332 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2333 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002334 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002335
Jiri Pirko8e586132011-12-08 19:52:37 -05002336 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002337 set_bit(vid, qdev->active_vlans);
2338
Ron Mercercc288f52009-02-23 10:42:14 +00002339 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002340
2341 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002342}
2343
Jiri Pirko8e586132011-12-08 19:52:37 -05002344static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002346 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002347 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002348
Jiri Pirko8e586132011-12-08 19:52:37 -05002349 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2350 MAC_ADDR_TYPE_VLAN, vid);
2351 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002352 netif_err(qdev, ifup, qdev->ndev,
2353 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002354 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002355}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002356
Jiri Pirko8e586132011-12-08 19:52:37 -05002357static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002358{
2359 struct ql_adapter *qdev = netdev_priv(ndev);
2360 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002361 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002362
2363 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2364 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002365 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002366
Jiri Pirko8e586132011-12-08 19:52:37 -05002367 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002368 clear_bit(vid, qdev->active_vlans);
2369
2370 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002371
2372 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002373}
2374
Ron Mercerc1b60092010-10-27 04:58:12 +00002375static void qlge_restore_vlan(struct ql_adapter *qdev)
2376{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002377 int status;
2378 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002379
Jiri Pirko18c49b92011-07-21 03:24:11 +00002380 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2381 if (status)
2382 return;
2383
2384 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2385 __qlge_vlan_rx_add_vid(qdev, vid);
2386
2387 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002388}
2389
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002390/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2391static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2392{
2393 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002394 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002395 return IRQ_HANDLED;
2396}
2397
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002398/* This handles a fatal error, MPI activity, and the default
2399 * rx_ring in an MSI-X multiple vector environment.
2400 * In MSI/Legacy environment it also process the rest of
2401 * the rx_rings.
2402 */
2403static irqreturn_t qlge_isr(int irq, void *dev_id)
2404{
2405 struct rx_ring *rx_ring = dev_id;
2406 struct ql_adapter *qdev = rx_ring->qdev;
2407 struct intr_context *intr_context = &qdev->intr_context[0];
2408 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002409 int work_done = 0;
2410
Ron Mercerbb0d2152008-10-20 10:30:26 -07002411 spin_lock(&qdev->hw_lock);
2412 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002413 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2414 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002415 spin_unlock(&qdev->hw_lock);
2416 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002417 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002418 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002419
Ron Mercerbb0d2152008-10-20 10:30:26 -07002420 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002421
2422 /*
2423 * Check for fatal error.
2424 */
2425 if (var & STS_FE) {
2426 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002427 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002428 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002429 netdev_err(qdev->ndev, "Resetting chip. "
2430 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002431 return IRQ_HANDLED;
2432 }
2433
2434 /*
2435 * Check MPI processor activity.
2436 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002437 if ((var & STS_PI) &&
2438 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002439 /*
2440 * We've got an async event or mailbox completion.
2441 * Handle it and clear the source of the interrupt.
2442 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002443 netif_err(qdev, intr, qdev->ndev,
2444 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002445 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002446 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2447 queue_delayed_work_on(smp_processor_id(),
2448 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002449 work_done++;
2450 }
2451
2452 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002453 * Get the bit-mask that shows the active queues for this
2454 * pass. Compare it to the queues that this irq services
2455 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002456 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002457 var = ql_read32(qdev, ISR1);
2458 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002459 netif_info(qdev, intr, qdev->ndev,
2460 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002461 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002462 napi_schedule(&rx_ring->napi);
2463 work_done++;
2464 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002465 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002466 return work_done ? IRQ_HANDLED : IRQ_NONE;
2467}
2468
2469static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2470{
2471
2472 if (skb_is_gso(skb)) {
2473 int err;
2474 if (skb_header_cloned(skb)) {
2475 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2476 if (err)
2477 return err;
2478 }
2479
2480 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2481 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2482 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2483 mac_iocb_ptr->total_hdrs_len =
2484 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2485 mac_iocb_ptr->net_trans_offset =
2486 cpu_to_le16(skb_network_offset(skb) |
2487 skb_transport_offset(skb)
2488 << OB_MAC_TRANSPORT_HDR_SHIFT);
2489 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2490 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2491 if (likely(skb->protocol == htons(ETH_P_IP))) {
2492 struct iphdr *iph = ip_hdr(skb);
2493 iph->check = 0;
2494 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2495 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2496 iph->daddr, 0,
2497 IPPROTO_TCP,
2498 0);
2499 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2500 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2501 tcp_hdr(skb)->check =
2502 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2503 &ipv6_hdr(skb)->daddr,
2504 0, IPPROTO_TCP, 0);
2505 }
2506 return 1;
2507 }
2508 return 0;
2509}
2510
2511static void ql_hw_csum_setup(struct sk_buff *skb,
2512 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2513{
2514 int len;
2515 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002516 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002517 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2518 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2519 mac_iocb_ptr->net_trans_offset =
2520 cpu_to_le16(skb_network_offset(skb) |
2521 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2522
2523 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2524 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2525 if (likely(iph->protocol == IPPROTO_TCP)) {
2526 check = &(tcp_hdr(skb)->check);
2527 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2528 mac_iocb_ptr->total_hdrs_len =
2529 cpu_to_le16(skb_transport_offset(skb) +
2530 (tcp_hdr(skb)->doff << 2));
2531 } else {
2532 check = &(udp_hdr(skb)->check);
2533 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2534 mac_iocb_ptr->total_hdrs_len =
2535 cpu_to_le16(skb_transport_offset(skb) +
2536 sizeof(struct udphdr));
2537 }
2538 *check = ~csum_tcpudp_magic(iph->saddr,
2539 iph->daddr, len, iph->protocol, 0);
2540}
2541
Stephen Hemminger613573252009-08-31 19:50:58 +00002542static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002543{
2544 struct tx_ring_desc *tx_ring_desc;
2545 struct ob_mac_iocb_req *mac_iocb_ptr;
2546 struct ql_adapter *qdev = netdev_priv(ndev);
2547 int tso;
2548 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002549 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002550
2551 tx_ring = &qdev->tx_ring[tx_ring_idx];
2552
Ron Mercer74c50b42009-03-09 10:59:27 +00002553 if (skb_padto(skb, ETH_ZLEN))
2554 return NETDEV_TX_OK;
2555
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002556 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002557 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002558 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002559 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002560 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002561 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002562 return NETDEV_TX_BUSY;
2563 }
2564 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2565 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002566 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002567
2568 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2569 mac_iocb_ptr->tid = tx_ring_desc->index;
2570 /* We use the upper 32-bits to store the tx queue for this IO.
2571 * When we get the completion we can use it to establish the context.
2572 */
2573 mac_iocb_ptr->txq_idx = tx_ring_idx;
2574 tx_ring_desc->skb = skb;
2575
2576 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2577
Jesse Grosseab6d182010-10-20 13:56:03 +00002578 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002579 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2580 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002581 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2582 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2583 }
2584 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2585 if (tso < 0) {
2586 dev_kfree_skb_any(skb);
2587 return NETDEV_TX_OK;
2588 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2589 ql_hw_csum_setup(skb,
2590 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2591 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002592 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2593 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002594 netif_err(qdev, tx_queued, qdev->ndev,
2595 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002596 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002597 return NETDEV_TX_BUSY;
2598 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002599 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2600 tx_ring->prod_idx++;
2601 if (tx_ring->prod_idx == tx_ring->wq_len)
2602 tx_ring->prod_idx = 0;
2603 wmb();
2604
2605 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002606 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2607 "tx queued, slot %d, len %d\n",
2608 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002609
2610 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002611
2612 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2613 netif_stop_subqueue(ndev, tx_ring->wq_id);
2614 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2615 /*
2616 * The queue got stopped because the tx_ring was full.
2617 * Wake it up, because it's now at least 25% empty.
2618 */
2619 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2620 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002621 return NETDEV_TX_OK;
2622}
2623
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002624
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002625static void ql_free_shadow_space(struct ql_adapter *qdev)
2626{
2627 if (qdev->rx_ring_shadow_reg_area) {
2628 pci_free_consistent(qdev->pdev,
2629 PAGE_SIZE,
2630 qdev->rx_ring_shadow_reg_area,
2631 qdev->rx_ring_shadow_reg_dma);
2632 qdev->rx_ring_shadow_reg_area = NULL;
2633 }
2634 if (qdev->tx_ring_shadow_reg_area) {
2635 pci_free_consistent(qdev->pdev,
2636 PAGE_SIZE,
2637 qdev->tx_ring_shadow_reg_area,
2638 qdev->tx_ring_shadow_reg_dma);
2639 qdev->tx_ring_shadow_reg_area = NULL;
2640 }
2641}
2642
2643static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2644{
2645 qdev->rx_ring_shadow_reg_area =
2646 pci_alloc_consistent(qdev->pdev,
2647 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2648 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002649 netif_err(qdev, ifup, qdev->ndev,
2650 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002651 return -ENOMEM;
2652 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002653 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002654 qdev->tx_ring_shadow_reg_area =
2655 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2656 &qdev->tx_ring_shadow_reg_dma);
2657 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002658 netif_err(qdev, ifup, qdev->ndev,
2659 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002660 goto err_wqp_sh_area;
2661 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002662 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002663 return 0;
2664
2665err_wqp_sh_area:
2666 pci_free_consistent(qdev->pdev,
2667 PAGE_SIZE,
2668 qdev->rx_ring_shadow_reg_area,
2669 qdev->rx_ring_shadow_reg_dma);
2670 return -ENOMEM;
2671}
2672
2673static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2674{
2675 struct tx_ring_desc *tx_ring_desc;
2676 int i;
2677 struct ob_mac_iocb_req *mac_iocb_ptr;
2678
2679 mac_iocb_ptr = tx_ring->wq_base;
2680 tx_ring_desc = tx_ring->q;
2681 for (i = 0; i < tx_ring->wq_len; i++) {
2682 tx_ring_desc->index = i;
2683 tx_ring_desc->skb = NULL;
2684 tx_ring_desc->queue_entry = mac_iocb_ptr;
2685 mac_iocb_ptr++;
2686 tx_ring_desc++;
2687 }
2688 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002689}
2690
2691static void ql_free_tx_resources(struct ql_adapter *qdev,
2692 struct tx_ring *tx_ring)
2693{
2694 if (tx_ring->wq_base) {
2695 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2696 tx_ring->wq_base, tx_ring->wq_base_dma);
2697 tx_ring->wq_base = NULL;
2698 }
2699 kfree(tx_ring->q);
2700 tx_ring->q = NULL;
2701}
2702
2703static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2704 struct tx_ring *tx_ring)
2705{
2706 tx_ring->wq_base =
2707 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2708 &tx_ring->wq_base_dma);
2709
Joe Perches8e95a202009-12-03 07:58:21 +00002710 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002711 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2712 goto pci_alloc_err;
2713
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002714 tx_ring->q =
2715 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2716 if (tx_ring->q == NULL)
2717 goto err;
2718
2719 return 0;
2720err:
2721 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2722 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002723 tx_ring->wq_base = NULL;
2724pci_alloc_err:
2725 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002726 return -ENOMEM;
2727}
2728
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002729static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002730{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002731 struct bq_desc *lbq_desc;
2732
Ron Mercer7c734352009-10-19 03:32:19 +00002733 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002734
Ron Mercer7c734352009-10-19 03:32:19 +00002735 curr_idx = rx_ring->lbq_curr_idx;
2736 clean_idx = rx_ring->lbq_clean_idx;
2737 while (curr_idx != clean_idx) {
2738 lbq_desc = &rx_ring->lbq[curr_idx];
2739
2740 if (lbq_desc->p.pg_chunk.last_flag) {
2741 pci_unmap_page(qdev->pdev,
2742 lbq_desc->p.pg_chunk.map,
2743 ql_lbq_block_size(qdev),
2744 PCI_DMA_FROMDEVICE);
2745 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002746 }
Ron Mercer7c734352009-10-19 03:32:19 +00002747
2748 put_page(lbq_desc->p.pg_chunk.page);
2749 lbq_desc->p.pg_chunk.page = NULL;
2750
2751 if (++curr_idx == rx_ring->lbq_len)
2752 curr_idx = 0;
2753
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002754 }
2755}
2756
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002757static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002758{
2759 int i;
2760 struct bq_desc *sbq_desc;
2761
2762 for (i = 0; i < rx_ring->sbq_len; i++) {
2763 sbq_desc = &rx_ring->sbq[i];
2764 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002765 netif_err(qdev, ifup, qdev->ndev,
2766 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002767 return;
2768 }
2769 if (sbq_desc->p.skb) {
2770 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002771 dma_unmap_addr(sbq_desc, mapaddr),
2772 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002773 PCI_DMA_FROMDEVICE);
2774 dev_kfree_skb(sbq_desc->p.skb);
2775 sbq_desc->p.skb = NULL;
2776 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002777 }
2778}
2779
Ron Mercer4545a3f2009-02-23 10:42:17 +00002780/* Free all large and small rx buffers associated
2781 * with the completion queues for this device.
2782 */
2783static void ql_free_rx_buffers(struct ql_adapter *qdev)
2784{
2785 int i;
2786 struct rx_ring *rx_ring;
2787
2788 for (i = 0; i < qdev->rx_ring_count; i++) {
2789 rx_ring = &qdev->rx_ring[i];
2790 if (rx_ring->lbq)
2791 ql_free_lbq_buffers(qdev, rx_ring);
2792 if (rx_ring->sbq)
2793 ql_free_sbq_buffers(qdev, rx_ring);
2794 }
2795}
2796
2797static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2798{
2799 struct rx_ring *rx_ring;
2800 int i;
2801
2802 for (i = 0; i < qdev->rx_ring_count; i++) {
2803 rx_ring = &qdev->rx_ring[i];
2804 if (rx_ring->type != TX_Q)
2805 ql_update_buffer_queues(qdev, rx_ring);
2806 }
2807}
2808
2809static void ql_init_lbq_ring(struct ql_adapter *qdev,
2810 struct rx_ring *rx_ring)
2811{
2812 int i;
2813 struct bq_desc *lbq_desc;
2814 __le64 *bq = rx_ring->lbq_base;
2815
2816 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2817 for (i = 0; i < rx_ring->lbq_len; i++) {
2818 lbq_desc = &rx_ring->lbq[i];
2819 memset(lbq_desc, 0, sizeof(*lbq_desc));
2820 lbq_desc->index = i;
2821 lbq_desc->addr = bq;
2822 bq++;
2823 }
2824}
2825
2826static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002827 struct rx_ring *rx_ring)
2828{
2829 int i;
2830 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002831 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002832
Ron Mercer4545a3f2009-02-23 10:42:17 +00002833 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002834 for (i = 0; i < rx_ring->sbq_len; i++) {
2835 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002836 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002837 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002838 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002839 bq++;
2840 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002841}
2842
2843static void ql_free_rx_resources(struct ql_adapter *qdev,
2844 struct rx_ring *rx_ring)
2845{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002846 /* Free the small buffer queue. */
2847 if (rx_ring->sbq_base) {
2848 pci_free_consistent(qdev->pdev,
2849 rx_ring->sbq_size,
2850 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2851 rx_ring->sbq_base = NULL;
2852 }
2853
2854 /* Free the small buffer queue control blocks. */
2855 kfree(rx_ring->sbq);
2856 rx_ring->sbq = NULL;
2857
2858 /* Free the large buffer queue. */
2859 if (rx_ring->lbq_base) {
2860 pci_free_consistent(qdev->pdev,
2861 rx_ring->lbq_size,
2862 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2863 rx_ring->lbq_base = NULL;
2864 }
2865
2866 /* Free the large buffer queue control blocks. */
2867 kfree(rx_ring->lbq);
2868 rx_ring->lbq = NULL;
2869
2870 /* Free the rx queue. */
2871 if (rx_ring->cq_base) {
2872 pci_free_consistent(qdev->pdev,
2873 rx_ring->cq_size,
2874 rx_ring->cq_base, rx_ring->cq_base_dma);
2875 rx_ring->cq_base = NULL;
2876 }
2877}
2878
2879/* Allocate queues and buffers for this completions queue based
2880 * on the values in the parameter structure. */
2881static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2882 struct rx_ring *rx_ring)
2883{
2884
2885 /*
2886 * Allocate the completion queue for this rx_ring.
2887 */
2888 rx_ring->cq_base =
2889 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2890 &rx_ring->cq_base_dma);
2891
2892 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002893 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002894 return -ENOMEM;
2895 }
2896
2897 if (rx_ring->sbq_len) {
2898 /*
2899 * Allocate small buffer queue.
2900 */
2901 rx_ring->sbq_base =
2902 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2903 &rx_ring->sbq_base_dma);
2904
2905 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002906 netif_err(qdev, ifup, qdev->ndev,
2907 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002908 goto err_mem;
2909 }
2910
2911 /*
2912 * Allocate small buffer queue control blocks.
2913 */
2914 rx_ring->sbq =
2915 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2916 GFP_KERNEL);
2917 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002918 netif_err(qdev, ifup, qdev->ndev,
2919 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002920 goto err_mem;
2921 }
2922
Ron Mercer4545a3f2009-02-23 10:42:17 +00002923 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002924 }
2925
2926 if (rx_ring->lbq_len) {
2927 /*
2928 * Allocate large buffer queue.
2929 */
2930 rx_ring->lbq_base =
2931 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2932 &rx_ring->lbq_base_dma);
2933
2934 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002935 netif_err(qdev, ifup, qdev->ndev,
2936 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937 goto err_mem;
2938 }
2939 /*
2940 * Allocate large buffer queue control blocks.
2941 */
2942 rx_ring->lbq =
2943 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2944 GFP_KERNEL);
2945 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002946 netif_err(qdev, ifup, qdev->ndev,
2947 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002948 goto err_mem;
2949 }
2950
Ron Mercer4545a3f2009-02-23 10:42:17 +00002951 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002952 }
2953
2954 return 0;
2955
2956err_mem:
2957 ql_free_rx_resources(qdev, rx_ring);
2958 return -ENOMEM;
2959}
2960
2961static void ql_tx_ring_clean(struct ql_adapter *qdev)
2962{
2963 struct tx_ring *tx_ring;
2964 struct tx_ring_desc *tx_ring_desc;
2965 int i, j;
2966
2967 /*
2968 * Loop through all queues and free
2969 * any resources.
2970 */
2971 for (j = 0; j < qdev->tx_ring_count; j++) {
2972 tx_ring = &qdev->tx_ring[j];
2973 for (i = 0; i < tx_ring->wq_len; i++) {
2974 tx_ring_desc = &tx_ring->q[i];
2975 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002976 netif_err(qdev, ifdown, qdev->ndev,
2977 "Freeing lost SKB %p, from queue %d, index %d.\n",
2978 tx_ring_desc->skb, j,
2979 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002980 ql_unmap_send(qdev, tx_ring_desc,
2981 tx_ring_desc->map_cnt);
2982 dev_kfree_skb(tx_ring_desc->skb);
2983 tx_ring_desc->skb = NULL;
2984 }
2985 }
2986 }
2987}
2988
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002989static void ql_free_mem_resources(struct ql_adapter *qdev)
2990{
2991 int i;
2992
2993 for (i = 0; i < qdev->tx_ring_count; i++)
2994 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2995 for (i = 0; i < qdev->rx_ring_count; i++)
2996 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2997 ql_free_shadow_space(qdev);
2998}
2999
3000static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3001{
3002 int i;
3003
3004 /* Allocate space for our shadow registers and such. */
3005 if (ql_alloc_shadow_space(qdev))
3006 return -ENOMEM;
3007
3008 for (i = 0; i < qdev->rx_ring_count; i++) {
3009 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003010 netif_err(qdev, ifup, qdev->ndev,
3011 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003012 goto err_mem;
3013 }
3014 }
3015 /* Allocate tx queue resources */
3016 for (i = 0; i < qdev->tx_ring_count; i++) {
3017 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003018 netif_err(qdev, ifup, qdev->ndev,
3019 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003020 goto err_mem;
3021 }
3022 }
3023 return 0;
3024
3025err_mem:
3026 ql_free_mem_resources(qdev);
3027 return -ENOMEM;
3028}
3029
3030/* Set up the rx ring control block and pass it to the chip.
3031 * The control block is defined as
3032 * "Completion Queue Initialization Control Block", or cqicb.
3033 */
3034static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3035{
3036 struct cqicb *cqicb = &rx_ring->cqicb;
3037 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003038 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003039 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003040 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003041 void __iomem *doorbell_area =
3042 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3043 int err = 0;
3044 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003045 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003046 __le64 *base_indirect_ptr;
3047 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003048
3049 /* Set up the shadow registers for this ring. */
3050 rx_ring->prod_idx_sh_reg = shadow_reg;
3051 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003052 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003053 shadow_reg += sizeof(u64);
3054 shadow_reg_dma += sizeof(u64);
3055 rx_ring->lbq_base_indirect = shadow_reg;
3056 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003057 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3058 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003059 rx_ring->sbq_base_indirect = shadow_reg;
3060 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3061
3062 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003063 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003064 rx_ring->cnsmr_idx = 0;
3065 rx_ring->curr_entry = rx_ring->cq_base;
3066
3067 /* PCI doorbell mem area + 0x04 for valid register */
3068 rx_ring->valid_db_reg = doorbell_area + 0x04;
3069
3070 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003071 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072
3073 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003074 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003075
3076 memset((void *)cqicb, 0, sizeof(struct cqicb));
3077 cqicb->msix_vect = rx_ring->irq;
3078
Ron Mercer459caf52009-01-04 17:08:11 -08003079 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3080 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003081
Ron Mercer97345522009-01-09 11:31:50 +00003082 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003083
Ron Mercer97345522009-01-09 11:31:50 +00003084 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003085
3086 /*
3087 * Set up the control block load flags.
3088 */
3089 cqicb->flags = FLAGS_LC | /* Load queue base address */
3090 FLAGS_LV | /* Load MSI-X vector */
3091 FLAGS_LI; /* Load irq delay values */
3092 if (rx_ring->lbq_len) {
3093 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003094 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003095 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003096 page_entries = 0;
3097 do {
3098 *base_indirect_ptr = cpu_to_le64(tmp);
3099 tmp += DB_PAGE_SIZE;
3100 base_indirect_ptr++;
3101 page_entries++;
3102 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003103 cqicb->lbq_addr =
3104 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003105 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3106 (u16) rx_ring->lbq_buf_size;
3107 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3108 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3109 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003110 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003111 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003112 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003113 rx_ring->lbq_clean_idx = 0;
3114 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003115 }
3116 if (rx_ring->sbq_len) {
3117 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003118 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003119 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003120 page_entries = 0;
3121 do {
3122 *base_indirect_ptr = cpu_to_le64(tmp);
3123 tmp += DB_PAGE_SIZE;
3124 base_indirect_ptr++;
3125 page_entries++;
3126 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003127 cqicb->sbq_addr =
3128 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003129 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003130 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003131 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3132 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003133 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003134 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003135 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003136 rx_ring->sbq_clean_idx = 0;
3137 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003138 }
3139 switch (rx_ring->type) {
3140 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003141 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3142 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3143 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003144 case RX_Q:
3145 /* Inbound completion handling rx_rings run in
3146 * separate NAPI contexts.
3147 */
3148 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3149 64);
3150 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3151 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3152 break;
3153 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003154 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3155 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003156 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003157 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3158 CFG_LCQ, rx_ring->cq_id);
3159 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003160 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003161 return err;
3162 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003163 return err;
3164}
3165
3166static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3167{
3168 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3169 void __iomem *doorbell_area =
3170 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3171 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3172 (tx_ring->wq_id * sizeof(u64));
3173 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3174 (tx_ring->wq_id * sizeof(u64));
3175 int err = 0;
3176
3177 /*
3178 * Assign doorbell registers for this tx_ring.
3179 */
3180 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003181 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182 tx_ring->prod_idx = 0;
3183 /* TX PCI doorbell mem area + 0x04 */
3184 tx_ring->valid_db_reg = doorbell_area + 0x04;
3185
3186 /*
3187 * Assign shadow registers for this tx_ring.
3188 */
3189 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3190 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3191
3192 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3193 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3194 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3195 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3196 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003197 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198
Ron Mercer97345522009-01-09 11:31:50 +00003199 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003200
3201 ql_init_tx_ring(qdev, tx_ring);
3202
Ron Mercere3324712009-07-02 06:06:13 +00003203 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003204 (u16) tx_ring->wq_id);
3205 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003206 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003207 return err;
3208 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003209 return err;
3210}
3211
3212static void ql_disable_msix(struct ql_adapter *qdev)
3213{
3214 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3215 pci_disable_msix(qdev->pdev);
3216 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3217 kfree(qdev->msi_x_entry);
3218 qdev->msi_x_entry = NULL;
3219 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3220 pci_disable_msi(qdev->pdev);
3221 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3222 }
3223}
3224
Ron Mercera4ab6132009-08-27 11:02:10 +00003225/* We start by trying to get the number of vectors
3226 * stored in qdev->intr_count. If we don't get that
3227 * many then we reduce the count and try again.
3228 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003229static void ql_enable_msix(struct ql_adapter *qdev)
3230{
Ron Mercera4ab6132009-08-27 11:02:10 +00003231 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003233 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003234 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003235 /* Try to alloc space for the msix struct,
3236 * if it fails then go to MSI/legacy.
3237 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003238 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003239 sizeof(struct msix_entry),
3240 GFP_KERNEL);
3241 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003242 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003243 goto msi;
3244 }
3245
Ron Mercera4ab6132009-08-27 11:02:10 +00003246 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003247 qdev->msi_x_entry[i].entry = i;
3248
Ron Mercera4ab6132009-08-27 11:02:10 +00003249 /* Loop to get our vectors. We start with
3250 * what we want and settle for what we get.
3251 */
3252 do {
3253 err = pci_enable_msix(qdev->pdev,
3254 qdev->msi_x_entry, qdev->intr_count);
3255 if (err > 0)
3256 qdev->intr_count = err;
3257 } while (err > 0);
3258
3259 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260 kfree(qdev->msi_x_entry);
3261 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003262 netif_warn(qdev, ifup, qdev->ndev,
3263 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003264 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003265 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003266 } else if (err == 0) {
3267 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003268 netif_info(qdev, ifup, qdev->ndev,
3269 "MSI-X Enabled, got %d vectors.\n",
3270 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003271 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003272 }
3273 }
3274msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003275 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003276 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003277 if (!pci_enable_msi(qdev->pdev)) {
3278 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003279 netif_info(qdev, ifup, qdev->ndev,
3280 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003281 return;
3282 }
3283 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003284 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003285 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3286 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003287}
3288
Ron Mercer39aa8162009-08-27 11:02:11 +00003289/* Each vector services 1 RSS ring and and 1 or more
3290 * TX completion rings. This function loops through
3291 * the TX completion rings and assigns the vector that
3292 * will service it. An example would be if there are
3293 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3294 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003295 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003296 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3297 */
3298static void ql_set_tx_vect(struct ql_adapter *qdev)
3299{
3300 int i, j, vect;
3301 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3302
3303 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3304 /* Assign irq vectors to TX rx_rings.*/
3305 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3306 i < qdev->rx_ring_count; i++) {
3307 if (j == tx_rings_per_vector) {
3308 vect++;
3309 j = 0;
3310 }
3311 qdev->rx_ring[i].irq = vect;
3312 j++;
3313 }
3314 } else {
3315 /* For single vector all rings have an irq
3316 * of zero.
3317 */
3318 for (i = 0; i < qdev->rx_ring_count; i++)
3319 qdev->rx_ring[i].irq = 0;
3320 }
3321}
3322
3323/* Set the interrupt mask for this vector. Each vector
3324 * will service 1 RSS ring and 1 or more TX completion
3325 * rings. This function sets up a bit mask per vector
3326 * that indicates which rings it services.
3327 */
3328static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3329{
3330 int j, vect = ctx->intr;
3331 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3332
3333 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3334 /* Add the RSS ring serviced by this vector
3335 * to the mask.
3336 */
3337 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3338 /* Add the TX ring(s) serviced by this vector
3339 * to the mask. */
3340 for (j = 0; j < tx_rings_per_vector; j++) {
3341 ctx->irq_mask |=
3342 (1 << qdev->rx_ring[qdev->rss_ring_count +
3343 (vect * tx_rings_per_vector) + j].cq_id);
3344 }
3345 } else {
3346 /* For single vector we just shift each queue's
3347 * ID into the mask.
3348 */
3349 for (j = 0; j < qdev->rx_ring_count; j++)
3350 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3351 }
3352}
3353
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003354/*
3355 * Here we build the intr_context structures based on
3356 * our rx_ring count and intr vector count.
3357 * The intr_context structure is used to hook each vector
3358 * to possibly different handlers.
3359 */
3360static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3361{
3362 int i = 0;
3363 struct intr_context *intr_context = &qdev->intr_context[0];
3364
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003365 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3366 /* Each rx_ring has it's
3367 * own intr_context since we have separate
3368 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003369 */
3370 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3371 qdev->rx_ring[i].irq = i;
3372 intr_context->intr = i;
3373 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003374 /* Set up this vector's bit-mask that indicates
3375 * which queues it services.
3376 */
3377 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003378 /*
3379 * We set up each vectors enable/disable/read bits so
3380 * there's no bit/mask calculations in the critical path.
3381 */
3382 intr_context->intr_en_mask =
3383 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3384 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3385 | i;
3386 intr_context->intr_dis_mask =
3387 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3388 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3389 INTR_EN_IHD | i;
3390 intr_context->intr_read_mask =
3391 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3392 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3393 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003394 if (i == 0) {
3395 /* The first vector/queue handles
3396 * broadcast/multicast, fatal errors,
3397 * and firmware events. This in addition
3398 * to normal inbound NAPI processing.
3399 */
3400 intr_context->handler = qlge_isr;
3401 sprintf(intr_context->name, "%s-rx-%d",
3402 qdev->ndev->name, i);
3403 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003404 /*
3405 * Inbound queues handle unicast frames only.
3406 */
3407 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003408 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003409 qdev->ndev->name, i);
3410 }
3411 }
3412 } else {
3413 /*
3414 * All rx_rings use the same intr_context since
3415 * there is only one vector.
3416 */
3417 intr_context->intr = 0;
3418 intr_context->qdev = qdev;
3419 /*
3420 * We set up each vectors enable/disable/read bits so
3421 * there's no bit/mask calculations in the critical path.
3422 */
3423 intr_context->intr_en_mask =
3424 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3425 intr_context->intr_dis_mask =
3426 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3427 INTR_EN_TYPE_DISABLE;
3428 intr_context->intr_read_mask =
3429 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3430 /*
3431 * Single interrupt means one handler for all rings.
3432 */
3433 intr_context->handler = qlge_isr;
3434 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003435 /* Set up this vector's bit-mask that indicates
3436 * which queues it services. In this case there is
3437 * a single vector so it will service all RSS and
3438 * TX completion rings.
3439 */
3440 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003441 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003442 /* Tell the TX completion rings which MSIx vector
3443 * they will be using.
3444 */
3445 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003446}
3447
3448static void ql_free_irq(struct ql_adapter *qdev)
3449{
3450 int i;
3451 struct intr_context *intr_context = &qdev->intr_context[0];
3452
3453 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3454 if (intr_context->hooked) {
3455 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3456 free_irq(qdev->msi_x_entry[i].vector,
3457 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003458 } else {
3459 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003460 }
3461 }
3462 }
3463 ql_disable_msix(qdev);
3464}
3465
3466static int ql_request_irq(struct ql_adapter *qdev)
3467{
3468 int i;
3469 int status = 0;
3470 struct pci_dev *pdev = qdev->pdev;
3471 struct intr_context *intr_context = &qdev->intr_context[0];
3472
3473 ql_resolve_queues_to_irqs(qdev);
3474
3475 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3476 atomic_set(&intr_context->irq_cnt, 0);
3477 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3478 status = request_irq(qdev->msi_x_entry[i].vector,
3479 intr_context->handler,
3480 0,
3481 intr_context->name,
3482 &qdev->rx_ring[i]);
3483 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003484 netif_err(qdev, ifup, qdev->ndev,
3485 "Failed request for MSIX interrupt %d.\n",
3486 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003487 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003488 }
3489 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003490 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3491 "trying msi or legacy interrupts.\n");
3492 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3493 "%s: irq = %d.\n", __func__, pdev->irq);
3494 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3495 "%s: context->name = %s.\n", __func__,
3496 intr_context->name);
3497 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3498 "%s: dev_id = 0x%p.\n", __func__,
3499 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003500 status =
3501 request_irq(pdev->irq, qlge_isr,
3502 test_bit(QL_MSI_ENABLED,
3503 &qdev->
3504 flags) ? 0 : IRQF_SHARED,
3505 intr_context->name, &qdev->rx_ring[0]);
3506 if (status)
3507 goto err_irq;
3508
Joe Perchesae9540f72010-02-09 11:49:52 +00003509 netif_err(qdev, ifup, qdev->ndev,
3510 "Hooked intr %d, queue type %s, with name %s.\n",
3511 i,
3512 qdev->rx_ring[0].type == DEFAULT_Q ?
3513 "DEFAULT_Q" :
3514 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3515 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3516 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003517 }
3518 intr_context->hooked = 1;
3519 }
3520 return status;
3521err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003522 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003523 ql_free_irq(qdev);
3524 return status;
3525}
3526
3527static int ql_start_rss(struct ql_adapter *qdev)
3528{
Joe Perches215faf92010-12-21 02:16:10 -08003529 static const u8 init_hash_seed[] = {
3530 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3531 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3532 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3533 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3534 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3535 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003536 struct ricb *ricb = &qdev->ricb;
3537 int status = 0;
3538 int i;
3539 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3540
Ron Mercere3324712009-07-02 06:06:13 +00003541 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003542
Ron Mercerb2014ff2009-08-27 11:02:09 +00003543 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003545 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3546 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003547
3548 /*
3549 * Fill out the Indirection Table.
3550 */
Ron Mercer541ae282009-10-08 09:54:37 +00003551 for (i = 0; i < 1024; i++)
3552 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553
Ron Mercer541ae282009-10-08 09:54:37 +00003554 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3555 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003556
Ron Mercere3324712009-07-02 06:06:13 +00003557 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003558 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003559 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003560 return status;
3561 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003562 return status;
3563}
3564
Ron Mercera5f59dc2009-07-02 06:06:07 +00003565static int ql_clear_routing_entries(struct ql_adapter *qdev)
3566{
3567 int i, status = 0;
3568
3569 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3570 if (status)
3571 return status;
3572 /* Clear all the entries in the routing table. */
3573 for (i = 0; i < 16; i++) {
3574 status = ql_set_routing_reg(qdev, i, 0, 0);
3575 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003576 netif_err(qdev, ifup, qdev->ndev,
3577 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003578 break;
3579 }
3580 }
3581 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3582 return status;
3583}
3584
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003585/* Initialize the frame-to-queue routing. */
3586static int ql_route_initialize(struct ql_adapter *qdev)
3587{
3588 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003589
3590 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003591 status = ql_clear_routing_entries(qdev);
3592 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003593 return status;
3594
3595 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3596 if (status)
3597 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003599 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3600 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003601 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003602 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003603 "Failed to init routing register "
3604 "for IP CSUM error packets.\n");
3605 goto exit;
3606 }
3607 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3608 RT_IDX_TU_CSUM_ERR, 1);
3609 if (status) {
3610 netif_err(qdev, ifup, qdev->ndev,
3611 "Failed to init routing register "
3612 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003613 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003614 }
3615 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3616 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003617 netif_err(qdev, ifup, qdev->ndev,
3618 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003619 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003620 }
3621 /* If we have more than one inbound queue, then turn on RSS in the
3622 * routing block.
3623 */
3624 if (qdev->rss_ring_count > 1) {
3625 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3626 RT_IDX_RSS_MATCH, 1);
3627 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003628 netif_err(qdev, ifup, qdev->ndev,
3629 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003630 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003631 }
3632 }
3633
3634 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3635 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003636 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003637 netif_err(qdev, ifup, qdev->ndev,
3638 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003639exit:
3640 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641 return status;
3642}
3643
Ron Mercer2ee1e272009-03-03 12:10:33 +00003644int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003645{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003646 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003647
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003648 /* If check if the link is up and use to
3649 * determine if we are setting or clearing
3650 * the MAC address in the CAM.
3651 */
3652 set = ql_read32(qdev, STS);
3653 set &= qdev->port_link_up;
3654 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003655 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003656 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003657 return status;
3658 }
3659
3660 status = ql_route_initialize(qdev);
3661 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003662 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003663
3664 return status;
3665}
3666
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003667static int ql_adapter_initialize(struct ql_adapter *qdev)
3668{
3669 u32 value, mask;
3670 int i;
3671 int status = 0;
3672
3673 /*
3674 * Set up the System register to halt on errors.
3675 */
3676 value = SYS_EFE | SYS_FAE;
3677 mask = value << 16;
3678 ql_write32(qdev, SYS, mask | value);
3679
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003680 /* Set the default queue, and VLAN behavior. */
3681 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3682 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3684
3685 /* Set the MPI interrupt to enabled. */
3686 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3687
3688 /* Enable the function, set pagesize, enable error checking. */
3689 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003690 FSC_EC | FSC_VM_PAGE_4K;
3691 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003692
3693 /* Set/clear header splitting. */
3694 mask = FSC_VM_PAGESIZE_MASK |
3695 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3696 ql_write32(qdev, FSC, mask | value);
3697
Ron Mercer572c5262010-01-02 10:37:42 +00003698 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003699
Ron Mercera3b71932009-10-08 09:54:38 +00003700 /* Set RX packet routing to use port/pci function on which the
3701 * packet arrived on in addition to usual frame routing.
3702 * This is helpful on bonding where both interfaces can have
3703 * the same MAC address.
3704 */
3705 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003706 /* Reroute all packets to our Interface.
3707 * They may have been routed to MPI firmware
3708 * due to WOL.
3709 */
3710 value = ql_read32(qdev, MGMT_RCV_CFG);
3711 value &= ~MGMT_RCV_CFG_RM;
3712 mask = 0xffff0000;
3713
3714 /* Sticky reg needs clearing due to WOL. */
3715 ql_write32(qdev, MGMT_RCV_CFG, mask);
3716 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3717
3718 /* Default WOL is enable on Mezz cards */
3719 if (qdev->pdev->subsystem_device == 0x0068 ||
3720 qdev->pdev->subsystem_device == 0x0180)
3721 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003722
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003723 /* Start up the rx queues. */
3724 for (i = 0; i < qdev->rx_ring_count; i++) {
3725 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3726 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003727 netif_err(qdev, ifup, qdev->ndev,
3728 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003729 return status;
3730 }
3731 }
3732
3733 /* If there is more than one inbound completion queue
3734 * then download a RICB to configure RSS.
3735 */
3736 if (qdev->rss_ring_count > 1) {
3737 status = ql_start_rss(qdev);
3738 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003739 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003740 return status;
3741 }
3742 }
3743
3744 /* Start up the tx queues. */
3745 for (i = 0; i < qdev->tx_ring_count; i++) {
3746 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3747 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003748 netif_err(qdev, ifup, qdev->ndev,
3749 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003750 return status;
3751 }
3752 }
3753
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003754 /* Initialize the port and set the max framesize. */
3755 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003756 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003757 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003758
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003759 /* Set up the MAC address and frame routing filter. */
3760 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003761 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003762 netif_err(qdev, ifup, qdev->ndev,
3763 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003764 return status;
3765 }
3766
3767 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003768 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003769 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003770
3771 return status;
3772}
3773
3774/* Issue soft reset to chip. */
3775static int ql_adapter_reset(struct ql_adapter *qdev)
3776{
3777 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003778 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003779 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003780
Ron Mercera5f59dc2009-07-02 06:06:07 +00003781 /* Clear all the entries in the routing table. */
3782 status = ql_clear_routing_entries(qdev);
3783 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003784 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003785 return status;
3786 }
3787
3788 end_jiffies = jiffies +
3789 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003790
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003791 /* Check if bit is set then skip the mailbox command and
3792 * clear the bit, else we are in normal reset process.
3793 */
3794 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3795 /* Stop management traffic. */
3796 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003797
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003798 /* Wait for the NIC and MGMNT FIFOs to empty. */
3799 ql_wait_fifo_empty(qdev);
3800 } else
3801 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003802
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003803 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003804
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003805 do {
3806 value = ql_read32(qdev, RST_FO);
3807 if ((value & RST_FO_FR) == 0)
3808 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003809 cpu_relax();
3810 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003811
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003812 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003813 netif_err(qdev, ifdown, qdev->ndev,
3814 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003815 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003816 }
3817
Ron Mercer84087f42009-10-08 09:54:41 +00003818 /* Resume management traffic. */
3819 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003820 return status;
3821}
3822
3823static void ql_display_dev_info(struct net_device *ndev)
3824{
Joe Perchesb16fed02010-11-15 11:12:28 +00003825 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003826
Joe Perchesae9540f72010-02-09 11:49:52 +00003827 netif_info(qdev, probe, qdev->ndev,
3828 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3829 "XG Roll = %d, XG Rev = %d.\n",
3830 qdev->func,
3831 qdev->port,
3832 qdev->chip_rev_id & 0x0000000f,
3833 qdev->chip_rev_id >> 4 & 0x0000000f,
3834 qdev->chip_rev_id >> 8 & 0x0000000f,
3835 qdev->chip_rev_id >> 12 & 0x0000000f);
3836 netif_info(qdev, probe, qdev->ndev,
3837 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003838}
3839
stephen hemmingerac409212010-10-21 07:50:54 +00003840static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003841{
3842 int status = 0;
3843 u32 wol = MB_WOL_DISABLE;
3844
3845 /* The CAM is still intact after a reset, but if we
3846 * are doing WOL, then we may need to program the
3847 * routing regs. We would also need to issue the mailbox
3848 * commands to instruct the MPI what to do per the ethtool
3849 * settings.
3850 */
3851
3852 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3853 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003854 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003855 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003856 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003857 return -EINVAL;
3858 }
3859
3860 if (qdev->wol & WAKE_MAGIC) {
3861 status = ql_mb_wol_set_magic(qdev, 1);
3862 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003863 netif_err(qdev, ifdown, qdev->ndev,
3864 "Failed to set magic packet on %s.\n",
3865 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003866 return status;
3867 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003868 netif_info(qdev, drv, qdev->ndev,
3869 "Enabled magic packet successfully on %s.\n",
3870 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003871
3872 wol |= MB_WOL_MAGIC_PKT;
3873 }
3874
3875 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003876 wol |= MB_WOL_MODE_ON;
3877 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003878 netif_err(qdev, drv, qdev->ndev,
3879 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003880 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003881 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003882 }
3883
3884 return status;
3885}
3886
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003887static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003888{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003889
Ron Mercer6497b602009-02-12 16:37:13 -08003890 /* Don't kill the reset worker thread if we
3891 * are in the process of recovery.
3892 */
3893 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3894 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003895 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3896 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003897 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003898 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003899 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003900}
3901
3902static int ql_adapter_down(struct ql_adapter *qdev)
3903{
3904 int i, status = 0;
3905
3906 ql_link_off(qdev);
3907
3908 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003909
Ron Mercer39aa8162009-08-27 11:02:11 +00003910 for (i = 0; i < qdev->rss_ring_count; i++)
3911 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003912
3913 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3914
3915 ql_disable_interrupts(qdev);
3916
3917 ql_tx_ring_clean(qdev);
3918
Ron Mercer6b318cb2009-03-09 10:59:26 +00003919 /* Call netif_napi_del() from common point.
3920 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003921 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003922 netif_napi_del(&qdev->rx_ring[i].napi);
3923
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003924 status = ql_adapter_reset(qdev);
3925 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003926 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3927 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003928 ql_free_rx_buffers(qdev);
3929
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003930 return status;
3931}
3932
3933static int ql_adapter_up(struct ql_adapter *qdev)
3934{
3935 int err = 0;
3936
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003937 err = ql_adapter_initialize(qdev);
3938 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003939 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003940 goto err_init;
3941 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003942 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003943 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003944 /* If the port is initialized and the
3945 * link is up the turn on the carrier.
3946 */
3947 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3948 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003949 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00003950 /* Restore rx mode. */
3951 clear_bit(QL_ALLMULTI, &qdev->flags);
3952 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3953 qlge_set_multicast_list(qdev->ndev);
3954
Ron Mercerc1b60092010-10-27 04:58:12 +00003955 /* Restore vlan setting. */
3956 qlge_restore_vlan(qdev);
3957
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003958 ql_enable_interrupts(qdev);
3959 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003960 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003961
3962 return 0;
3963err_init:
3964 ql_adapter_reset(qdev);
3965 return err;
3966}
3967
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003968static void ql_release_adapter_resources(struct ql_adapter *qdev)
3969{
3970 ql_free_mem_resources(qdev);
3971 ql_free_irq(qdev);
3972}
3973
3974static int ql_get_adapter_resources(struct ql_adapter *qdev)
3975{
3976 int status = 0;
3977
3978 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003979 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003980 return -ENOMEM;
3981 }
3982 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003983 return status;
3984}
3985
3986static int qlge_close(struct net_device *ndev)
3987{
3988 struct ql_adapter *qdev = netdev_priv(ndev);
3989
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003990 /* If we hit pci_channel_io_perm_failure
3991 * failure condition, then we already
3992 * brought the adapter down.
3993 */
3994 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003995 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003996 clear_bit(QL_EEH_FATAL, &qdev->flags);
3997 return 0;
3998 }
3999
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004000 /*
4001 * Wait for device to recover from a reset.
4002 * (Rarely happens, but possible.)
4003 */
4004 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4005 msleep(1);
4006 ql_adapter_down(qdev);
4007 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004008 return 0;
4009}
4010
4011static int ql_configure_rings(struct ql_adapter *qdev)
4012{
4013 int i;
4014 struct rx_ring *rx_ring;
4015 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004016 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004017 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4018 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4019
4020 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004021
Ron Mercera4ab6132009-08-27 11:02:10 +00004022 /* In a perfect world we have one RSS ring for each CPU
4023 * and each has it's own vector. To do that we ask for
4024 * cpu_cnt vectors. ql_enable_msix() will adjust the
4025 * vector count to what we actually get. We then
4026 * allocate an RSS ring for each.
4027 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004028 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004029 qdev->intr_count = cpu_cnt;
4030 ql_enable_msix(qdev);
4031 /* Adjust the RSS ring count to the actual vector count. */
4032 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004033 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004034 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004035
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004036 for (i = 0; i < qdev->tx_ring_count; i++) {
4037 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004038 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004039 tx_ring->qdev = qdev;
4040 tx_ring->wq_id = i;
4041 tx_ring->wq_len = qdev->tx_ring_size;
4042 tx_ring->wq_size =
4043 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4044
4045 /*
4046 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004047 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004048 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004049 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004050 }
4051
4052 for (i = 0; i < qdev->rx_ring_count; i++) {
4053 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004054 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004055 rx_ring->qdev = qdev;
4056 rx_ring->cq_id = i;
4057 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004058 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004059 /*
4060 * Inbound (RSS) queues.
4061 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004062 rx_ring->cq_len = qdev->rx_ring_size;
4063 rx_ring->cq_size =
4064 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4065 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4066 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004067 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004068 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004069 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4070 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004071 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004072 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004073 rx_ring->type = RX_Q;
4074 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004075 /*
4076 * Outbound queue handles outbound completions only.
4077 */
4078 /* outbound cq is same size as tx_ring it services. */
4079 rx_ring->cq_len = qdev->tx_ring_size;
4080 rx_ring->cq_size =
4081 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4082 rx_ring->lbq_len = 0;
4083 rx_ring->lbq_size = 0;
4084 rx_ring->lbq_buf_size = 0;
4085 rx_ring->sbq_len = 0;
4086 rx_ring->sbq_size = 0;
4087 rx_ring->sbq_buf_size = 0;
4088 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004089 }
4090 }
4091 return 0;
4092}
4093
4094static int qlge_open(struct net_device *ndev)
4095{
4096 int err = 0;
4097 struct ql_adapter *qdev = netdev_priv(ndev);
4098
Ron Mercer74e12432009-11-11 12:54:04 +00004099 err = ql_adapter_reset(qdev);
4100 if (err)
4101 return err;
4102
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004103 err = ql_configure_rings(qdev);
4104 if (err)
4105 return err;
4106
4107 err = ql_get_adapter_resources(qdev);
4108 if (err)
4109 goto error_up;
4110
4111 err = ql_adapter_up(qdev);
4112 if (err)
4113 goto error_up;
4114
4115 return err;
4116
4117error_up:
4118 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004119 return err;
4120}
4121
Ron Mercer7c734352009-10-19 03:32:19 +00004122static int ql_change_rx_buffers(struct ql_adapter *qdev)
4123{
4124 struct rx_ring *rx_ring;
4125 int i, status;
4126 u32 lbq_buf_len;
4127
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004128 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004129 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4130 int i = 3;
4131 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004132 netif_err(qdev, ifup, qdev->ndev,
4133 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004134 ssleep(1);
4135 }
4136
4137 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004138 netif_err(qdev, ifup, qdev->ndev,
4139 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004140 return -ETIMEDOUT;
4141 }
4142 }
4143
4144 status = ql_adapter_down(qdev);
4145 if (status)
4146 goto error;
4147
4148 /* Get the new rx buffer size. */
4149 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4150 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4151 qdev->lbq_buf_order = get_order(lbq_buf_len);
4152
4153 for (i = 0; i < qdev->rss_ring_count; i++) {
4154 rx_ring = &qdev->rx_ring[i];
4155 /* Set the new size. */
4156 rx_ring->lbq_buf_size = lbq_buf_len;
4157 }
4158
4159 status = ql_adapter_up(qdev);
4160 if (status)
4161 goto error;
4162
4163 return status;
4164error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004165 netif_alert(qdev, ifup, qdev->ndev,
4166 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004167 set_bit(QL_ADAPTER_UP, &qdev->flags);
4168 dev_close(qdev->ndev);
4169 return status;
4170}
4171
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004172static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4173{
4174 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004175 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004176
4177 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004178 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004179 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004180 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004181 } else
4182 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004183
4184 queue_delayed_work(qdev->workqueue,
4185 &qdev->mpi_port_cfg_work, 3*HZ);
4186
Breno Leitao746079d2010-02-04 10:11:19 +00004187 ndev->mtu = new_mtu;
4188
Ron Mercer7c734352009-10-19 03:32:19 +00004189 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004190 return 0;
4191 }
4192
Ron Mercer7c734352009-10-19 03:32:19 +00004193 status = ql_change_rx_buffers(qdev);
4194 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004195 netif_err(qdev, ifup, qdev->ndev,
4196 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004197 }
4198
4199 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004200}
4201
4202static struct net_device_stats *qlge_get_stats(struct net_device
4203 *ndev)
4204{
Ron Mercer885ee392009-11-03 13:49:31 +00004205 struct ql_adapter *qdev = netdev_priv(ndev);
4206 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4207 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4208 unsigned long pkts, mcast, dropped, errors, bytes;
4209 int i;
4210
4211 /* Get RX stats. */
4212 pkts = mcast = dropped = errors = bytes = 0;
4213 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4214 pkts += rx_ring->rx_packets;
4215 bytes += rx_ring->rx_bytes;
4216 dropped += rx_ring->rx_dropped;
4217 errors += rx_ring->rx_errors;
4218 mcast += rx_ring->rx_multicast;
4219 }
4220 ndev->stats.rx_packets = pkts;
4221 ndev->stats.rx_bytes = bytes;
4222 ndev->stats.rx_dropped = dropped;
4223 ndev->stats.rx_errors = errors;
4224 ndev->stats.multicast = mcast;
4225
4226 /* Get TX stats. */
4227 pkts = errors = bytes = 0;
4228 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4229 pkts += tx_ring->tx_packets;
4230 bytes += tx_ring->tx_bytes;
4231 errors += tx_ring->tx_errors;
4232 }
4233 ndev->stats.tx_packets = pkts;
4234 ndev->stats.tx_bytes = bytes;
4235 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004236 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004237}
4238
stephen hemmingerac409212010-10-21 07:50:54 +00004239static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004240{
Joe Perchesb16fed02010-11-15 11:12:28 +00004241 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004242 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004243 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004244
Ron Mercercc288f52009-02-23 10:42:14 +00004245 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4246 if (status)
4247 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004248 /*
4249 * Set or clear promiscuous mode if a
4250 * transition is taking place.
4251 */
4252 if (ndev->flags & IFF_PROMISC) {
4253 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4254 if (ql_set_routing_reg
4255 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004256 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004257 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004258 } else {
4259 set_bit(QL_PROMISCUOUS, &qdev->flags);
4260 }
4261 }
4262 } else {
4263 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4264 if (ql_set_routing_reg
4265 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004266 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004267 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004268 } else {
4269 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4270 }
4271 }
4272 }
4273
4274 /*
4275 * Set or clear all multicast mode if a
4276 * transition is taking place.
4277 */
4278 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004279 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004280 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4281 if (ql_set_routing_reg
4282 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004283 netif_err(qdev, hw, qdev->ndev,
4284 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004285 } else {
4286 set_bit(QL_ALLMULTI, &qdev->flags);
4287 }
4288 }
4289 } else {
4290 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4291 if (ql_set_routing_reg
4292 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004293 netif_err(qdev, hw, qdev->ndev,
4294 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004295 } else {
4296 clear_bit(QL_ALLMULTI, &qdev->flags);
4297 }
4298 }
4299 }
4300
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004301 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004302 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4303 if (status)
4304 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004305 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004306 netdev_for_each_mc_addr(ha, ndev) {
4307 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004308 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004309 netif_err(qdev, hw, qdev->ndev,
4310 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004311 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004312 goto exit;
4313 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004314 i++;
4315 }
Ron Mercercc288f52009-02-23 10:42:14 +00004316 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004317 if (ql_set_routing_reg
4318 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004319 netif_err(qdev, hw, qdev->ndev,
4320 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004321 } else {
4322 set_bit(QL_ALLMULTI, &qdev->flags);
4323 }
4324 }
4325exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004326 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327}
4328
4329static int qlge_set_mac_address(struct net_device *ndev, void *p)
4330{
Joe Perchesb16fed02010-11-15 11:12:28 +00004331 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004332 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004333 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004334
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004335 if (!is_valid_ether_addr(addr->sa_data))
4336 return -EADDRNOTAVAIL;
4337 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004338 /* Update local copy of current mac address. */
4339 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004340
Ron Mercercc288f52009-02-23 10:42:14 +00004341 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4342 if (status)
4343 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004344 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4345 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004346 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004347 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004348 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4349 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004350}
4351
4352static void qlge_tx_timeout(struct net_device *ndev)
4353{
Joe Perchesb16fed02010-11-15 11:12:28 +00004354 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004355 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004356}
4357
4358static void ql_asic_reset_work(struct work_struct *work)
4359{
4360 struct ql_adapter *qdev =
4361 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004362 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004363 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004364 status = ql_adapter_down(qdev);
4365 if (status)
4366 goto error;
4367
4368 status = ql_adapter_up(qdev);
4369 if (status)
4370 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004371
4372 /* Restore rx mode. */
4373 clear_bit(QL_ALLMULTI, &qdev->flags);
4374 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4375 qlge_set_multicast_list(qdev->ndev);
4376
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004377 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004378 return;
4379error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004380 netif_alert(qdev, ifup, qdev->ndev,
4381 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004382
Ron Mercerdb988122009-03-09 10:59:17 +00004383 set_bit(QL_ADAPTER_UP, &qdev->flags);
4384 dev_close(qdev->ndev);
4385 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004386}
4387
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004388static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004389 .get_flash = ql_get_8012_flash_params,
4390 .port_initialize = ql_8012_port_initialize,
4391};
4392
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004393static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004394 .get_flash = ql_get_8000_flash_params,
4395 .port_initialize = ql_8000_port_initialize,
4396};
4397
Ron Mercere4552f52009-06-09 05:39:32 +00004398/* Find the pcie function number for the other NIC
4399 * on this chip. Since both NIC functions share a
4400 * common firmware we have the lowest enabled function
4401 * do any common work. Examples would be resetting
4402 * after a fatal firmware error, or doing a firmware
4403 * coredump.
4404 */
4405static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004406{
Ron Mercere4552f52009-06-09 05:39:32 +00004407 int status = 0;
4408 u32 temp;
4409 u32 nic_func1, nic_func2;
4410
4411 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4412 &temp);
4413 if (status)
4414 return status;
4415
4416 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4417 MPI_TEST_NIC_FUNC_MASK);
4418 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4419 MPI_TEST_NIC_FUNC_MASK);
4420
4421 if (qdev->func == nic_func1)
4422 qdev->alt_func = nic_func2;
4423 else if (qdev->func == nic_func2)
4424 qdev->alt_func = nic_func1;
4425 else
4426 status = -EIO;
4427
4428 return status;
4429}
4430
4431static int ql_get_board_info(struct ql_adapter *qdev)
4432{
4433 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004434 qdev->func =
4435 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004436 if (qdev->func > 3)
4437 return -EIO;
4438
4439 status = ql_get_alt_pcie_func(qdev);
4440 if (status)
4441 return status;
4442
4443 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4444 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004445 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4446 qdev->port_link_up = STS_PL1;
4447 qdev->port_init = STS_PI1;
4448 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4449 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4450 } else {
4451 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4452 qdev->port_link_up = STS_PL0;
4453 qdev->port_init = STS_PI0;
4454 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4455 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4456 }
4457 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004458 qdev->device_id = qdev->pdev->device;
4459 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4460 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004461 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4462 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004463 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004464}
4465
4466static void ql_release_all(struct pci_dev *pdev)
4467{
4468 struct net_device *ndev = pci_get_drvdata(pdev);
4469 struct ql_adapter *qdev = netdev_priv(ndev);
4470
4471 if (qdev->workqueue) {
4472 destroy_workqueue(qdev->workqueue);
4473 qdev->workqueue = NULL;
4474 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004475
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004476 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004477 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004478 if (qdev->doorbell_area)
4479 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004480 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004481 pci_release_regions(pdev);
4482 pci_set_drvdata(pdev, NULL);
4483}
4484
4485static int __devinit ql_init_device(struct pci_dev *pdev,
4486 struct net_device *ndev, int cards_found)
4487{
4488 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004489 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004490
Ron Mercere3324712009-07-02 06:06:13 +00004491 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004492 err = pci_enable_device(pdev);
4493 if (err) {
4494 dev_err(&pdev->dev, "PCI device enable failed.\n");
4495 return err;
4496 }
4497
Ron Mercerebd6e772009-09-29 08:39:25 +00004498 qdev->ndev = ndev;
4499 qdev->pdev = pdev;
4500 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004501
Ron Mercerbc9167f2009-10-10 09:35:04 +00004502 /* Set PCIe read request size */
4503 err = pcie_set_readrq(pdev, 4096);
4504 if (err) {
4505 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004506 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004507 }
4508
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004509 err = pci_request_regions(pdev, DRV_NAME);
4510 if (err) {
4511 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004512 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004513 }
4514
4515 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004516 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004517 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004518 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004519 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004520 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004521 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004522 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004523 }
4524
4525 if (err) {
4526 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004527 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004528 }
4529
Ron Mercer73475332009-11-06 07:44:58 +00004530 /* Set PCIe reset type for EEH to fundamental. */
4531 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004532 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004533 qdev->reg_base =
4534 ioremap_nocache(pci_resource_start(pdev, 1),
4535 pci_resource_len(pdev, 1));
4536 if (!qdev->reg_base) {
4537 dev_err(&pdev->dev, "Register mapping failed.\n");
4538 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004539 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004540 }
4541
4542 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4543 qdev->doorbell_area =
4544 ioremap_nocache(pci_resource_start(pdev, 3),
4545 pci_resource_len(pdev, 3));
4546 if (!qdev->doorbell_area) {
4547 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4548 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004549 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004550 }
4551
Ron Mercere4552f52009-06-09 05:39:32 +00004552 err = ql_get_board_info(qdev);
4553 if (err) {
4554 dev_err(&pdev->dev, "Register access failed.\n");
4555 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004556 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004557 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004558 qdev->msg_enable = netif_msg_init(debug, default_msg);
4559 spin_lock_init(&qdev->hw_lock);
4560 spin_lock_init(&qdev->stats_lock);
4561
Ron Mercer8aae2602010-01-15 13:31:28 +00004562 if (qlge_mpi_coredump) {
4563 qdev->mpi_coredump =
4564 vmalloc(sizeof(struct ql_mpi_coredump));
4565 if (qdev->mpi_coredump == NULL) {
4566 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4567 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004568 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004569 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004570 if (qlge_force_coredump)
4571 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004572 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004573 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004574 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004575 if (err) {
4576 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004577 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578 }
4579
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004580 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004581 /* Keep local copy of current mac address. */
4582 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004583
4584 /* Set up the default ring sizes. */
4585 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4586 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4587
4588 /* Set up the coalescing parameters. */
4589 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4590 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4591 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4592 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4593
4594 /*
4595 * Set up the operating parameters.
4596 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004597 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4598 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4599 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4600 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004601 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004602 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004603 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004604 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004605 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004606
4607 if (!cards_found) {
4608 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4609 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4610 DRV_NAME, DRV_VERSION);
4611 }
4612 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004613err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004614 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004615err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004616 pci_disable_device(pdev);
4617 return err;
4618}
4619
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004620static const struct net_device_ops qlge_netdev_ops = {
4621 .ndo_open = qlge_open,
4622 .ndo_stop = qlge_close,
4623 .ndo_start_xmit = qlge_send,
4624 .ndo_change_mtu = qlge_change_mtu,
4625 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004626 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004627 .ndo_set_mac_address = qlge_set_mac_address,
4628 .ndo_validate_addr = eth_validate_addr,
4629 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004630 .ndo_fix_features = qlge_fix_features,
4631 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004632 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4633 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004634};
4635
Ron Mercer15c052f2010-02-04 13:32:46 -08004636static void ql_timer(unsigned long data)
4637{
4638 struct ql_adapter *qdev = (struct ql_adapter *)data;
4639 u32 var = 0;
4640
4641 var = ql_read32(qdev, STS);
4642 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004643 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004644 return;
4645 }
4646
Breno Leitao72046d82010-07-01 03:00:17 +00004647 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004648}
4649
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004650static int __devinit qlge_probe(struct pci_dev *pdev,
4651 const struct pci_device_id *pci_entry)
4652{
4653 struct net_device *ndev = NULL;
4654 struct ql_adapter *qdev = NULL;
4655 static int cards_found = 0;
4656 int err = 0;
4657
Ron Mercer1e213302009-03-09 10:59:21 +00004658 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004659 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004660 if (!ndev)
4661 return -ENOMEM;
4662
4663 err = ql_init_device(pdev, ndev, cards_found);
4664 if (err < 0) {
4665 free_netdev(ndev);
4666 return err;
4667 }
4668
4669 qdev = netdev_priv(ndev);
4670 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004671 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4672 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4673 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4674 ndev->features = ndev->hw_features |
4675 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004676
4677 if (test_bit(QL_DMA64, &qdev->flags))
4678 ndev->features |= NETIF_F_HIGHDMA;
4679
4680 /*
4681 * Set up net_device structure.
4682 */
4683 ndev->tx_queue_len = qdev->tx_ring_size;
4684 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004685
4686 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004687 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004688 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004689
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004690 err = register_netdev(ndev);
4691 if (err) {
4692 dev_err(&pdev->dev, "net device registration failed.\n");
4693 ql_release_all(pdev);
4694 pci_disable_device(pdev);
4695 return err;
4696 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004697 /* Start up the timer to trigger EEH if
4698 * the bus goes dead
4699 */
4700 init_timer_deferrable(&qdev->timer);
4701 qdev->timer.data = (unsigned long)qdev;
4702 qdev->timer.function = ql_timer;
4703 qdev->timer.expires = jiffies + (5*HZ);
4704 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004705 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004706 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004707 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004708 cards_found++;
4709 return 0;
4710}
4711
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004712netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4713{
4714 return qlge_send(skb, ndev);
4715}
4716
4717int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4718{
4719 return ql_clean_inbound_rx_ring(rx_ring, budget);
4720}
4721
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004722static void __devexit qlge_remove(struct pci_dev *pdev)
4723{
4724 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004725 struct ql_adapter *qdev = netdev_priv(ndev);
4726 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004727 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004728 unregister_netdev(ndev);
4729 ql_release_all(pdev);
4730 pci_disable_device(pdev);
4731 free_netdev(ndev);
4732}
4733
Ron Mercer6d190c62009-10-28 08:39:20 +00004734/* Clean up resources without touching hardware. */
4735static void ql_eeh_close(struct net_device *ndev)
4736{
4737 int i;
4738 struct ql_adapter *qdev = netdev_priv(ndev);
4739
4740 if (netif_carrier_ok(ndev)) {
4741 netif_carrier_off(ndev);
4742 netif_stop_queue(ndev);
4743 }
4744
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004745 /* Disabling the timer */
4746 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004747 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004748
4749 for (i = 0; i < qdev->rss_ring_count; i++)
4750 netif_napi_del(&qdev->rx_ring[i].napi);
4751
4752 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4753 ql_tx_ring_clean(qdev);
4754 ql_free_rx_buffers(qdev);
4755 ql_release_adapter_resources(qdev);
4756}
4757
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004758/*
4759 * This callback is called by the PCI subsystem whenever
4760 * a PCI bus error is detected.
4761 */
4762static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4763 enum pci_channel_state state)
4764{
4765 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004766 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004767
Ron Mercer6d190c62009-10-28 08:39:20 +00004768 switch (state) {
4769 case pci_channel_io_normal:
4770 return PCI_ERS_RESULT_CAN_RECOVER;
4771 case pci_channel_io_frozen:
4772 netif_device_detach(ndev);
4773 if (netif_running(ndev))
4774 ql_eeh_close(ndev);
4775 pci_disable_device(pdev);
4776 return PCI_ERS_RESULT_NEED_RESET;
4777 case pci_channel_io_perm_failure:
4778 dev_err(&pdev->dev,
4779 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004780 ql_eeh_close(ndev);
4781 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004782 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004783 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004784
4785 /* Request a slot reset. */
4786 return PCI_ERS_RESULT_NEED_RESET;
4787}
4788
4789/*
4790 * This callback is called after the PCI buss has been reset.
4791 * Basically, this tries to restart the card from scratch.
4792 * This is a shortened version of the device probe/discovery code,
4793 * it resembles the first-half of the () routine.
4794 */
4795static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4796{
4797 struct net_device *ndev = pci_get_drvdata(pdev);
4798 struct ql_adapter *qdev = netdev_priv(ndev);
4799
Ron Mercer6d190c62009-10-28 08:39:20 +00004800 pdev->error_state = pci_channel_io_normal;
4801
4802 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004803 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004804 netif_err(qdev, ifup, qdev->ndev,
4805 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004806 return PCI_ERS_RESULT_DISCONNECT;
4807 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004808 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004809
4810 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004811 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004812 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004813 return PCI_ERS_RESULT_DISCONNECT;
4814 }
4815
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004816 return PCI_ERS_RESULT_RECOVERED;
4817}
4818
4819static void qlge_io_resume(struct pci_dev *pdev)
4820{
4821 struct net_device *ndev = pci_get_drvdata(pdev);
4822 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004823 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004824
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004825 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004826 err = qlge_open(ndev);
4827 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004828 netif_err(qdev, ifup, qdev->ndev,
4829 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004830 return;
4831 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004832 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004833 netif_err(qdev, ifup, qdev->ndev,
4834 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004835 }
Breno Leitao72046d82010-07-01 03:00:17 +00004836 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004837 netif_device_attach(ndev);
4838}
4839
4840static struct pci_error_handlers qlge_err_handler = {
4841 .error_detected = qlge_io_error_detected,
4842 .slot_reset = qlge_io_slot_reset,
4843 .resume = qlge_io_resume,
4844};
4845
4846static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4847{
4848 struct net_device *ndev = pci_get_drvdata(pdev);
4849 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004850 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004851
4852 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004853 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004854
4855 if (netif_running(ndev)) {
4856 err = ql_adapter_down(qdev);
4857 if (!err)
4858 return err;
4859 }
4860
Ron Mercerbc083ce2009-10-21 11:07:40 +00004861 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004862 err = pci_save_state(pdev);
4863 if (err)
4864 return err;
4865
4866 pci_disable_device(pdev);
4867
4868 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4869
4870 return 0;
4871}
4872
David S. Miller04da2cf2008-09-19 16:14:24 -07004873#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004874static int qlge_resume(struct pci_dev *pdev)
4875{
4876 struct net_device *ndev = pci_get_drvdata(pdev);
4877 struct ql_adapter *qdev = netdev_priv(ndev);
4878 int err;
4879
4880 pci_set_power_state(pdev, PCI_D0);
4881 pci_restore_state(pdev);
4882 err = pci_enable_device(pdev);
4883 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004884 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004885 return err;
4886 }
4887 pci_set_master(pdev);
4888
4889 pci_enable_wake(pdev, PCI_D3hot, 0);
4890 pci_enable_wake(pdev, PCI_D3cold, 0);
4891
4892 if (netif_running(ndev)) {
4893 err = ql_adapter_up(qdev);
4894 if (err)
4895 return err;
4896 }
4897
Breno Leitao72046d82010-07-01 03:00:17 +00004898 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004899 netif_device_attach(ndev);
4900
4901 return 0;
4902}
David S. Miller04da2cf2008-09-19 16:14:24 -07004903#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004904
4905static void qlge_shutdown(struct pci_dev *pdev)
4906{
4907 qlge_suspend(pdev, PMSG_SUSPEND);
4908}
4909
4910static struct pci_driver qlge_driver = {
4911 .name = DRV_NAME,
4912 .id_table = qlge_pci_tbl,
4913 .probe = qlge_probe,
4914 .remove = __devexit_p(qlge_remove),
4915#ifdef CONFIG_PM
4916 .suspend = qlge_suspend,
4917 .resume = qlge_resume,
4918#endif
4919 .shutdown = qlge_shutdown,
4920 .err_handler = &qlge_err_handler
4921};
4922
4923static int __init qlge_init_module(void)
4924{
4925 return pci_register_driver(&qlge_driver);
4926}
4927
4928static void __exit qlge_exit(void)
4929{
4930 pci_unregister_driver(&qlge_driver);
4931}
4932
4933module_init(qlge_init_module);
4934module_exit(qlge_exit);