blob: 743e3ec729c2303fc84e09183e36dc64e5fcc34b [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040040#include <linux/delay.h>
41#include <linux/mm.h>
42#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040043#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070044#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040045
46#include "qlge.h"
47
48char qlge_driver_name[] = DRV_NAME;
49const char qlge_driver_version[] = DRV_VERSION;
50
51MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
52MODULE_DESCRIPTION(DRV_STRING " ");
53MODULE_LICENSE("GPL");
54MODULE_VERSION(DRV_VERSION);
55
56static const u32 default_msg =
57 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
58/* NETIF_MSG_TIMER | */
59 NETIF_MSG_IFDOWN |
60 NETIF_MSG_IFUP |
61 NETIF_MSG_RX_ERR |
62 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000063/* NETIF_MSG_TX_QUEUED | */
64/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040065/* NETIF_MSG_PKTDATA | */
66 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
67
Sonny Rao84cf7022010-11-18 11:50:02 +000068static int debug = -1; /* defaults above */
69module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040070MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71
72#define MSIX_IRQ 0
73#define MSI_IRQ 1
74#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000075static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000076module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000077MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040078
Ron Mercer8aae2602010-01-15 13:31:28 +000079static int qlge_mpi_coredump;
80module_param(qlge_mpi_coredump, int, 0);
81MODULE_PARM_DESC(qlge_mpi_coredump,
82 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000083 "Default is OFF - Do Not allocate memory. ");
84
85static int qlge_force_coredump;
86module_param(qlge_force_coredump, int, 0);
87MODULE_PARM_DESC(qlge_force_coredump,
88 "Option to allow force of firmware core dump. "
89 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000090
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000091static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000093 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040094 /* required last entry */
95 {0,}
96};
97
98MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
99
stephen hemmingerac409212010-10-21 07:50:54 +0000100static int ql_wol(struct ql_adapter *qdev);
101static void qlge_set_multicast_list(struct net_device *ndev);
102
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400103/* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
106 */
107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108{
109 u32 sem_bits = 0;
110
111 switch (sem_mask) {
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 break;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 break;
118 case SEM_ICB_MASK:
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 break;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 break;
124 case SEM_FLASH_MASK:
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 break;
127 case SEM_PROBE_MASK:
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 break;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 break;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 break;
136 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400138 return -EINVAL;
139 }
140
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
143}
144
145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000147 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400148 do {
149 if (!ql_sem_trylock(qdev, sem_mask))
150 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000151 udelay(100);
152 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400153 return -ETIMEDOUT;
154}
155
156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157{
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
160}
161
162/* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 */
167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168{
169 u32 temp;
170 int count = UDELAY_COUNT;
171
172 while (count) {
173 temp = ql_read32(qdev, reg);
174
175 /* check for errors */
176 if (temp & err_bit) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400180 return -EIO;
181 } else if (temp & bit)
182 return 0;
183 udelay(UDELAY_DELAY);
184 count--;
185 }
Joe Perchesae9540f2010-02-09 11:49:52 +0000186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400188 return -ETIMEDOUT;
189}
190
191/* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
193 */
194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195{
196 int count = UDELAY_COUNT;
197 u32 temp;
198
199 while (count) {
200 temp = ql_read32(qdev, CFG);
201 if (temp & CFG_LE)
202 return -EIO;
203 if (!(temp & bit))
204 return 0;
205 udelay(UDELAY_DELAY);
206 count--;
207 }
208 return -ETIMEDOUT;
209}
210
211
212/* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
214 */
215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 u16 q_id)
217{
218 u64 map;
219 int status = 0;
220 int direction;
221 u32 mask;
222 u32 value;
223
224 direction =
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 PCI_DMA_FROMDEVICE;
227
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400231 return -ENOMEM;
232 }
233
Ron Mercer4322c5b2009-07-02 06:06:06 +0000234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 if (status)
236 return status;
237
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400238 status = ql_wait_cfg(qdev, bit);
239 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400242 goto exit;
243 }
244
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400247
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
251
252 /*
253 * Wait for the bit to clear after signaling hw.
254 */
255 status = ql_wait_cfg(qdev, bit);
256exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 pci_unmap_single(qdev->pdev, map, size, direction);
259 return status;
260}
261
262/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 u32 *value)
265{
266 u32 offset = 0;
267 int status;
268
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400269 switch (type) {
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
272 {
273 status =
274 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400276 if (status)
277 goto exit;
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 status =
288 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 status =
303 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400305 if (status)
306 goto exit;
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 status =
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800312 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400313 if (status)
314 goto exit;
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 }
317 break;
318 }
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
321 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400324 status = -EPERM;
325 }
326exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400327 return status;
328}
329
330/* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
332 */
333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 u16 index)
335{
336 u32 offset = 0;
337 int status = 0;
338
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400339 switch (type) {
340 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000341 {
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
353 type | MAC_ADDR_E);
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
362 type | MAC_ADDR_E);
363
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 status =
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 goto exit;
370 break;
371 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 case MAC_ADDR_TYPE_CAM_MAC:
373 {
374 u32 cam_output;
375 u32 upper = (addr[0] << 8) | addr[1];
376 u32 lower =
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 (addr[5]);
379
Joe Perchesae9540f2010-02-09 11:49:52 +0000380 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
381 "Adding %s address %pM at index %d in the CAM.\n",
382 type == MAC_ADDR_TYPE_MULTI_MAC ?
383 "MULTICAST" : "UNICAST",
384 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400385
386 status =
387 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800388 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400389 if (status)
390 goto exit;
391 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
392 (index << MAC_ADDR_IDX_SHIFT) | /* index */
393 type); /* type */
394 ql_write32(qdev, MAC_ADDR_DATA, lower);
395 status =
396 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800397 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400398 if (status)
399 goto exit;
400 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
401 (index << MAC_ADDR_IDX_SHIFT) | /* index */
402 type); /* type */
403 ql_write32(qdev, MAC_ADDR_DATA, upper);
404 status =
405 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800406 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400407 if (status)
408 goto exit;
409 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
410 (index << MAC_ADDR_IDX_SHIFT) | /* index */
411 type); /* type */
412 /* This field should also include the queue id
413 and possibly the function id. Right now we hardcode
414 the route field to NIC core.
415 */
Ron Mercer76b26692009-10-08 09:54:40 +0000416 cam_output = (CAM_OUT_ROUTE_NIC |
417 (qdev->
418 func << CAM_OUT_FUNC_SHIFT) |
419 (0 << CAM_OUT_CQ_ID_SHIFT));
Jiri Pirko18c49b92011-07-21 03:24:11 +0000420 if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000421 cam_output |= CAM_OUT_RV;
422 /* route to NIC core */
423 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400424 break;
425 }
426 case MAC_ADDR_TYPE_VLAN:
427 {
428 u32 enable_bit = *((u32 *) &addr[0]);
429 /* For VLAN, the addr actually holds a bit that
430 * either enables or disables the vlan id we are
431 * addressing. It's either MAC_ADDR_E on or off.
432 * That's bit-27 we're talking about.
433 */
Joe Perchesae9540f2010-02-09 11:49:52 +0000434 netif_info(qdev, ifup, qdev->ndev,
435 "%s VLAN ID %d %s the CAM.\n",
436 enable_bit ? "Adding" : "Removing",
437 index,
438 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400439
440 status =
441 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800442 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400443 if (status)
444 goto exit;
445 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
446 (index << MAC_ADDR_IDX_SHIFT) | /* index */
447 type | /* type */
448 enable_bit); /* enable/disable */
449 break;
450 }
451 case MAC_ADDR_TYPE_MULTI_FLTR:
452 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000453 netif_crit(qdev, ifup, qdev->ndev,
454 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400455 status = -EPERM;
456 }
457exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400458 return status;
459}
460
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000461/* Set or clear MAC address in hardware. We sometimes
462 * have to clear it to prevent wrong frame routing
463 * especially in a bonding environment.
464 */
465static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
466{
467 int status;
468 char zero_mac_addr[ETH_ALEN];
469 char *addr;
470
471 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000472 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f2010-02-09 11:49:52 +0000473 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
474 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000475 } else {
476 memset(zero_mac_addr, 0, ETH_ALEN);
477 addr = &zero_mac_addr[0];
Joe Perchesae9540f2010-02-09 11:49:52 +0000478 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
479 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000480 }
481 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
482 if (status)
483 return status;
484 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
485 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
486 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
487 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +0000488 netif_err(qdev, ifup, qdev->ndev,
489 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000490 return status;
491}
492
Ron Mercer6a473302009-07-02 06:06:12 +0000493void ql_link_on(struct ql_adapter *qdev)
494{
Joe Perchesae9540f2010-02-09 11:49:52 +0000495 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000496 netif_carrier_on(qdev->ndev);
497 ql_set_mac_addr(qdev, 1);
498}
499
500void ql_link_off(struct ql_adapter *qdev)
501{
Joe Perchesae9540f2010-02-09 11:49:52 +0000502 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000503 netif_carrier_off(qdev->ndev);
504 ql_set_mac_addr(qdev, 0);
505}
506
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507/* Get a specific frame routing value from the CAM.
508 * Used for debug and reg dump.
509 */
510int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
511{
512 int status = 0;
513
Ron Mercer939678f2009-01-04 17:08:29 -0800514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400515 if (status)
516 goto exit;
517
518 ql_write32(qdev, RT_IDX,
519 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800520 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400521 if (status)
522 goto exit;
523 *value = ql_read32(qdev, RT_DATA);
524exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 return status;
526}
527
528/* The NIC function for this chip has 16 routing indexes. Each one can be used
529 * to route different frame types to various inbound queues. We send broadcast/
530 * multicast/error frames to the default queue for slow handling,
531 * and CAM hit/RSS frames to the fast handling queues.
532 */
533static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
534 int enable)
535{
Ron Mercer8587ea32009-02-23 10:42:15 +0000536 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400537 u32 value = 0;
538
Joe Perchesae9540f2010-02-09 11:49:52 +0000539 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
540 "%s %s mask %s the routing reg.\n",
541 enable ? "Adding" : "Removing",
542 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
543 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
544 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
545 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
546 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
547 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
548 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
549 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
550 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
551 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
552 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
553 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
554 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
555 index == RT_IDX_UNUSED013 ? "UNUSED13" :
556 index == RT_IDX_UNUSED014 ? "UNUSED14" :
557 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
558 "(Bad index != RT_IDX)",
559 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400560
561 switch (mask) {
562 case RT_IDX_CAM_HIT:
563 {
564 value = RT_IDX_DST_CAM_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_VALID: /* Promiscuous Mode frames. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
577 {
578 value = RT_IDX_DST_DFLT_Q | /* dest */
579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000583 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
584 {
585 value = RT_IDX_DST_DFLT_Q | /* dest */
586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_IP_CSUM_ERR_SLOT <<
588 RT_IDX_IDX_SHIFT); /* index */
589 break;
590 }
591 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
592 {
593 value = RT_IDX_DST_DFLT_Q | /* dest */
594 RT_IDX_TYPE_NICQ | /* type */
595 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
596 RT_IDX_IDX_SHIFT); /* index */
597 break;
598 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400599 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
600 {
601 value = RT_IDX_DST_DFLT_Q | /* dest */
602 RT_IDX_TYPE_NICQ | /* type */
603 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
604 break;
605 }
606 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
607 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000608 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400609 RT_IDX_TYPE_NICQ | /* type */
610 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
611 break;
612 }
613 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
614 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000615 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400616 RT_IDX_TYPE_NICQ | /* type */
617 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
618 break;
619 }
620 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
621 {
622 value = RT_IDX_DST_RSS | /* dest */
623 RT_IDX_TYPE_NICQ | /* type */
624 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
625 break;
626 }
627 case 0: /* Clear the E-bit on an entry. */
628 {
629 value = RT_IDX_DST_DFLT_Q | /* dest */
630 RT_IDX_TYPE_NICQ | /* type */
631 (index << RT_IDX_IDX_SHIFT);/* index */
632 break;
633 }
634 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000635 netif_err(qdev, ifup, qdev->ndev,
636 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400637 status = -EPERM;
638 goto exit;
639 }
640
641 if (value) {
642 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
643 if (status)
644 goto exit;
645 value |= (enable ? RT_IDX_E : 0);
646 ql_write32(qdev, RT_IDX, value);
647 ql_write32(qdev, RT_DATA, enable ? mask : 0);
648 }
649exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400650 return status;
651}
652
653static void ql_enable_interrupts(struct ql_adapter *qdev)
654{
655 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
656}
657
658static void ql_disable_interrupts(struct ql_adapter *qdev)
659{
660 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
661}
662
663/* If we're running with multiple MSI-X vectors then we enable on the fly.
664 * Otherwise, we may have multiple outstanding workers and don't want to
665 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300666 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400667 * a worker finishes. Once it hits zero we enable the interrupt.
668 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700669u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400670{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700671 u32 var = 0;
672 unsigned long hw_flags = 0;
673 struct intr_context *ctx = qdev->intr_context + intr;
674
675 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
676 /* Always enable if we're MSIX multi interrupts and
677 * it's not the default (zeroeth) interrupt.
678 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700680 ctx->intr_en_mask);
681 var = ql_read32(qdev, STS);
682 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400683 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700684
685 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
686 if (atomic_dec_and_test(&ctx->irq_cnt)) {
687 ql_write32(qdev, INTR_EN,
688 ctx->intr_en_mask);
689 var = ql_read32(qdev, STS);
690 }
691 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
692 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693}
694
695static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
696{
697 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700698 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400699
Ron Mercerbb0d2152008-10-20 10:30:26 -0700700 /* HW disables for us if we're MSIX multi interrupts and
701 * it's not the default (zeroeth) interrupt.
702 */
703 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
704 return 0;
705
706 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000707 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700708 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400709 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700710 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400711 var = ql_read32(qdev, STS);
712 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700713 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000714 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400715 return var;
716}
717
718static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
719{
720 int i;
721 for (i = 0; i < qdev->intr_count; i++) {
722 /* The enable call does a atomic_dec_and_test
723 * and enables only if the result is zero.
724 * So we precharge it here.
725 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700726 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
727 i == 0))
728 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400729 ql_enable_completion_interrupt(qdev, i);
730 }
731
732}
733
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000734static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
735{
736 int status, i;
737 u16 csum = 0;
738 __le16 *flash = (__le16 *)&qdev->flash;
739
740 status = strncmp((char *)&qdev->flash, str, 4);
741 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000742 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000743 return status;
744 }
745
746 for (i = 0; i < size; i++)
747 csum += le16_to_cpu(*flash++);
748
749 if (csum)
Joe Perchesae9540f2010-02-09 11:49:52 +0000750 netif_err(qdev, ifup, qdev->ndev,
751 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000752
753 return csum;
754}
755
Ron Mercer26351472009-02-02 13:53:57 -0800756static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400757{
758 int status = 0;
759 /* wait for reg to come ready */
760 status = ql_wait_reg_rdy(qdev,
761 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
762 if (status)
763 goto exit;
764 /* set up for reg read */
765 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
766 /* wait for reg to come ready */
767 status = ql_wait_reg_rdy(qdev,
768 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
769 if (status)
770 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800771 /* This data is stored on flash as an array of
772 * __le32. Since ql_read32() returns cpu endian
773 * we need to swap it back.
774 */
775 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400776exit:
777 return status;
778}
779
Ron Mercercdca8d02009-03-02 08:07:31 +0000780static int ql_get_8000_flash_params(struct ql_adapter *qdev)
781{
782 u32 i, size;
783 int status;
784 __le32 *p = (__le32 *)&qdev->flash;
785 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000786 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000787
788 /* Get flash offset for function and adjust
789 * for dword access.
790 */
Ron Mercere4552f52009-06-09 05:39:32 +0000791 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000792 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
793 else
794 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
795
796 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
797 return -ETIMEDOUT;
798
799 size = sizeof(struct flash_params_8000) / sizeof(u32);
800 for (i = 0; i < size; i++, p++) {
801 status = ql_read_flash_word(qdev, i+offset, p);
802 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000803 netif_err(qdev, ifup, qdev->ndev,
804 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000805 goto exit;
806 }
807 }
808
809 status = ql_validate_flash(qdev,
810 sizeof(struct flash_params_8000) / sizeof(u16),
811 "8000");
812 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000813 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000814 status = -EINVAL;
815 goto exit;
816 }
817
Ron Mercer542512e2009-06-09 05:39:33 +0000818 /* Extract either manufacturer or BOFM modified
819 * MAC address.
820 */
821 if (qdev->flash.flash_params_8000.data_type1 == 2)
822 memcpy(mac_addr,
823 qdev->flash.flash_params_8000.mac_addr1,
824 qdev->ndev->addr_len);
825 else
826 memcpy(mac_addr,
827 qdev->flash.flash_params_8000.mac_addr,
828 qdev->ndev->addr_len);
829
830 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000831 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000832 status = -EINVAL;
833 goto exit;
834 }
835
836 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000837 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000838 qdev->ndev->addr_len);
839
840exit:
841 ql_sem_unlock(qdev, SEM_FLASH_MASK);
842 return status;
843}
844
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000845static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400846{
847 int i;
848 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800849 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800850 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000851 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800852
853 /* Second function's parameters follow the first
854 * function's.
855 */
Ron Mercere4552f52009-06-09 05:39:32 +0000856 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000857 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400858
859 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
860 return -ETIMEDOUT;
861
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000862 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800863 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400864 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000865 netif_err(qdev, ifup, qdev->ndev,
866 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400867 goto exit;
868 }
869
870 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000871
872 status = ql_validate_flash(qdev,
873 sizeof(struct flash_params_8012) / sizeof(u16),
874 "8012");
875 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000876 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000877 status = -EINVAL;
878 goto exit;
879 }
880
881 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
882 status = -EINVAL;
883 goto exit;
884 }
885
886 memcpy(qdev->ndev->dev_addr,
887 qdev->flash.flash_params_8012.mac_addr,
888 qdev->ndev->addr_len);
889
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400890exit:
891 ql_sem_unlock(qdev, SEM_FLASH_MASK);
892 return status;
893}
894
895/* xgmac register are located behind the xgmac_addr and xgmac_data
896 * register pair. Each read/write requires us to wait for the ready
897 * bit before reading/writing the data.
898 */
899static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
900{
901 int status;
902 /* wait for reg to come ready */
903 status = ql_wait_reg_rdy(qdev,
904 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
905 if (status)
906 return status;
907 /* write the data to the data reg */
908 ql_write32(qdev, XGMAC_DATA, data);
909 /* trigger the write */
910 ql_write32(qdev, XGMAC_ADDR, reg);
911 return status;
912}
913
914/* xgmac register are located behind the xgmac_addr and xgmac_data
915 * register pair. Each read/write requires us to wait for the ready
916 * bit before reading/writing the data.
917 */
918int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
919{
920 int status = 0;
921 /* wait for reg to come ready */
922 status = ql_wait_reg_rdy(qdev,
923 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
924 if (status)
925 goto exit;
926 /* set up for reg read */
927 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
928 /* wait for reg to come ready */
929 status = ql_wait_reg_rdy(qdev,
930 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
931 if (status)
932 goto exit;
933 /* get the data */
934 *data = ql_read32(qdev, XGMAC_DATA);
935exit:
936 return status;
937}
938
939/* This is used for reading the 64-bit statistics regs. */
940int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
941{
942 int status = 0;
943 u32 hi = 0;
944 u32 lo = 0;
945
946 status = ql_read_xgmac_reg(qdev, reg, &lo);
947 if (status)
948 goto exit;
949
950 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
951 if (status)
952 goto exit;
953
954 *data = (u64) lo | ((u64) hi << 32);
955
956exit:
957 return status;
958}
959
Ron Mercercdca8d02009-03-02 08:07:31 +0000960static int ql_8000_port_initialize(struct ql_adapter *qdev)
961{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000962 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000963 /*
964 * Get MPI firmware version for driver banner
965 * and ethool info.
966 */
967 status = ql_mb_about_fw(qdev);
968 if (status)
969 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000970 status = ql_mb_get_fw_state(qdev);
971 if (status)
972 goto exit;
973 /* Wake up a worker to get/set the TX/RX frame sizes. */
974 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
975exit:
976 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000977}
978
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400979/* Take the MAC Core out of reset.
980 * Enable statistics counting.
981 * Take the transmitter/receiver out of reset.
982 * This functionality may be done in the MPI firmware at a
983 * later date.
984 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000985static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400986{
987 int status = 0;
988 u32 data;
989
990 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
991 /* Another function has the semaphore, so
992 * wait for the port init bit to come ready.
993 */
Joe Perchesae9540f2010-02-09 11:49:52 +0000994 netif_info(qdev, link, qdev->ndev,
995 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400996 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
997 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000998 netif_crit(qdev, link, qdev->ndev,
999 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001000 }
1001 return status;
1002 }
1003
Joe Perchesae9540f2010-02-09 11:49:52 +00001004 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001005 /* Set the core reset. */
1006 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1007 if (status)
1008 goto end;
1009 data |= GLOBAL_CFG_RESET;
1010 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1011 if (status)
1012 goto end;
1013
1014 /* Clear the core reset and turn on jumbo for receiver. */
1015 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1016 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1017 data |= GLOBAL_CFG_TX_STAT_EN;
1018 data |= GLOBAL_CFG_RX_STAT_EN;
1019 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1020 if (status)
1021 goto end;
1022
1023 /* Enable transmitter, and clear it's reset. */
1024 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1025 if (status)
1026 goto end;
1027 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1028 data |= TX_CFG_EN; /* Enable the transmitter. */
1029 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1030 if (status)
1031 goto end;
1032
1033 /* Enable receiver and clear it's reset. */
1034 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1035 if (status)
1036 goto end;
1037 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1038 data |= RX_CFG_EN; /* Enable the receiver. */
1039 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1040 if (status)
1041 goto end;
1042
1043 /* Turn on jumbo. */
1044 status =
1045 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1046 if (status)
1047 goto end;
1048 status =
1049 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1050 if (status)
1051 goto end;
1052
1053 /* Signal to the world that the port is enabled. */
1054 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1055end:
1056 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1057 return status;
1058}
1059
Ron Mercer7c734352009-10-19 03:32:19 +00001060static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1061{
1062 return PAGE_SIZE << qdev->lbq_buf_order;
1063}
1064
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001065/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001066static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001067{
1068 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1069 rx_ring->lbq_curr_idx++;
1070 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1071 rx_ring->lbq_curr_idx = 0;
1072 rx_ring->lbq_free_cnt++;
1073 return lbq_desc;
1074}
1075
Ron Mercer7c734352009-10-19 03:32:19 +00001076static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1077 struct rx_ring *rx_ring)
1078{
1079 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1080
1081 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001082 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001083 rx_ring->lbq_buf_size,
1084 PCI_DMA_FROMDEVICE);
1085
1086 /* If it's the last chunk of our master page then
1087 * we unmap it.
1088 */
1089 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1090 == ql_lbq_block_size(qdev))
1091 pci_unmap_page(qdev->pdev,
1092 lbq_desc->p.pg_chunk.map,
1093 ql_lbq_block_size(qdev),
1094 PCI_DMA_FROMDEVICE);
1095 return lbq_desc;
1096}
1097
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001098/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001099static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001100{
1101 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1102 rx_ring->sbq_curr_idx++;
1103 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1104 rx_ring->sbq_curr_idx = 0;
1105 rx_ring->sbq_free_cnt++;
1106 return sbq_desc;
1107}
1108
1109/* Update an rx ring index. */
1110static void ql_update_cq(struct rx_ring *rx_ring)
1111{
1112 rx_ring->cnsmr_idx++;
1113 rx_ring->curr_entry++;
1114 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1115 rx_ring->cnsmr_idx = 0;
1116 rx_ring->curr_entry = rx_ring->cq_base;
1117 }
1118}
1119
1120static void ql_write_cq_idx(struct rx_ring *rx_ring)
1121{
1122 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1123}
1124
Ron Mercer7c734352009-10-19 03:32:19 +00001125static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1126 struct bq_desc *lbq_desc)
1127{
1128 if (!rx_ring->pg_chunk.page) {
1129 u64 map;
1130 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1131 GFP_ATOMIC,
1132 qdev->lbq_buf_order);
1133 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001134 netif_err(qdev, drv, qdev->ndev,
1135 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001136 return -ENOMEM;
1137 }
1138 rx_ring->pg_chunk.offset = 0;
1139 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1140 0, ql_lbq_block_size(qdev),
1141 PCI_DMA_FROMDEVICE);
1142 if (pci_dma_mapping_error(qdev->pdev, map)) {
1143 __free_pages(rx_ring->pg_chunk.page,
1144 qdev->lbq_buf_order);
Joe Perchesae9540f2010-02-09 11:49:52 +00001145 netif_err(qdev, drv, qdev->ndev,
1146 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001147 return -ENOMEM;
1148 }
1149 rx_ring->pg_chunk.map = map;
1150 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1151 }
1152
1153 /* Copy the current master pg_chunk info
1154 * to the current descriptor.
1155 */
1156 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1157
1158 /* Adjust the master page chunk for next
1159 * buffer get.
1160 */
1161 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1162 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1163 rx_ring->pg_chunk.page = NULL;
1164 lbq_desc->p.pg_chunk.last_flag = 1;
1165 } else {
1166 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1167 get_page(rx_ring->pg_chunk.page);
1168 lbq_desc->p.pg_chunk.last_flag = 0;
1169 }
1170 return 0;
1171}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001172/* Process (refill) a large buffer queue. */
1173static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1174{
Ron Mercer49f21862009-02-23 10:42:16 +00001175 u32 clean_idx = rx_ring->lbq_clean_idx;
1176 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001177 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001178 u64 map;
1179 int i;
1180
Ron Mercer7c734352009-10-19 03:32:19 +00001181 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001182 for (i = 0; i < 16; i++) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184 "lbq: try cleaning clean_idx = %d.\n",
1185 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001186 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001187 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001188 netif_err(qdev, ifup, qdev->ndev,
1189 "Could not get a page chunk.\n");
1190 return;
1191 }
Ron Mercer7c734352009-10-19 03:32:19 +00001192
1193 map = lbq_desc->p.pg_chunk.map +
1194 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001195 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1196 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001197 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001198 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001199
1200 pci_dma_sync_single_for_device(qdev->pdev, map,
1201 rx_ring->lbq_buf_size,
1202 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001203 clean_idx++;
1204 if (clean_idx == rx_ring->lbq_len)
1205 clean_idx = 0;
1206 }
1207
1208 rx_ring->lbq_clean_idx = clean_idx;
1209 rx_ring->lbq_prod_idx += 16;
1210 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1211 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001212 rx_ring->lbq_free_cnt -= 16;
1213 }
1214
1215 if (start_idx != clean_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001216 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1217 "lbq: updating prod idx = %d.\n",
1218 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001219 ql_write_db_reg(rx_ring->lbq_prod_idx,
1220 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001221 }
1222}
1223
1224/* Process (refill) a small buffer queue. */
1225static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1226{
Ron Mercer49f21862009-02-23 10:42:16 +00001227 u32 clean_idx = rx_ring->sbq_clean_idx;
1228 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001229 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001230 u64 map;
1231 int i;
1232
1233 while (rx_ring->sbq_free_cnt > 16) {
1234 for (i = 0; i < 16; i++) {
1235 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f2010-02-09 11:49:52 +00001236 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1237 "sbq: try cleaning clean_idx = %d.\n",
1238 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001239 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001240 netif_printk(qdev, rx_status, KERN_DEBUG,
1241 qdev->ndev,
1242 "sbq: getting new skb for index %d.\n",
1243 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001244 sbq_desc->p.skb =
1245 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001246 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001247 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001248 netif_err(qdev, probe, qdev->ndev,
1249 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001250 rx_ring->sbq_clean_idx = clean_idx;
1251 return;
1252 }
1253 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1254 map = pci_map_single(qdev->pdev,
1255 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001256 rx_ring->sbq_buf_size,
1257 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001258 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001259 netif_err(qdev, ifup, qdev->ndev,
1260 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001261 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001262 dev_kfree_skb_any(sbq_desc->p.skb);
1263 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001264 return;
1265 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001266 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1267 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001268 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001269 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001270 }
1271
1272 clean_idx++;
1273 if (clean_idx == rx_ring->sbq_len)
1274 clean_idx = 0;
1275 }
1276 rx_ring->sbq_clean_idx = clean_idx;
1277 rx_ring->sbq_prod_idx += 16;
1278 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1279 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001280 rx_ring->sbq_free_cnt -= 16;
1281 }
1282
1283 if (start_idx != clean_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001284 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1285 "sbq: updating prod idx = %d.\n",
1286 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 ql_write_db_reg(rx_ring->sbq_prod_idx,
1288 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001289 }
1290}
1291
1292static void ql_update_buffer_queues(struct ql_adapter *qdev,
1293 struct rx_ring *rx_ring)
1294{
1295 ql_update_sbq(qdev, rx_ring);
1296 ql_update_lbq(qdev, rx_ring);
1297}
1298
1299/* Unmaps tx buffers. Can be called from send() if a pci mapping
1300 * fails at some stage, or from the interrupt when a tx completes.
1301 */
1302static void ql_unmap_send(struct ql_adapter *qdev,
1303 struct tx_ring_desc *tx_ring_desc, int mapped)
1304{
1305 int i;
1306 for (i = 0; i < mapped; i++) {
1307 if (i == 0 || (i == 7 && mapped > 7)) {
1308 /*
1309 * Unmap the skb->data area, or the
1310 * external sglist (AKA the Outbound
1311 * Address List (OAL)).
1312 * If its the zeroeth element, then it's
1313 * the skb->data area. If it's the 7th
1314 * element and there is more than 6 frags,
1315 * then its an OAL.
1316 */
1317 if (i == 7) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001318 netif_printk(qdev, tx_done, KERN_DEBUG,
1319 qdev->ndev,
1320 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001321 }
1322 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001323 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001324 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001325 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001326 maplen),
1327 PCI_DMA_TODEVICE);
1328 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00001329 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1330 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001331 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001332 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001334 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001335 maplen), PCI_DMA_TODEVICE);
1336 }
1337 }
1338
1339}
1340
1341/* Map the buffers for this transmit. This will return
1342 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1343 */
1344static int ql_map_send(struct ql_adapter *qdev,
1345 struct ob_mac_iocb_req *mac_iocb_ptr,
1346 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1347{
1348 int len = skb_headlen(skb);
1349 dma_addr_t map;
1350 int frag_idx, err, map_idx = 0;
1351 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1352 int frag_cnt = skb_shinfo(skb)->nr_frags;
1353
1354 if (frag_cnt) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001355 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1356 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001357 }
1358 /*
1359 * Map the skb buffer first.
1360 */
1361 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1362
1363 err = pci_dma_mapping_error(qdev->pdev, map);
1364 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001365 netif_err(qdev, tx_queued, qdev->ndev,
1366 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001367
1368 return NETDEV_TX_BUSY;
1369 }
1370
1371 tbd->len = cpu_to_le32(len);
1372 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001373 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1374 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001375 map_idx++;
1376
1377 /*
1378 * This loop fills the remainder of the 8 address descriptors
1379 * in the IOCB. If there are more than 7 fragments, then the
1380 * eighth address desc will point to an external list (OAL).
1381 * When this happens, the remainder of the frags will be stored
1382 * in this list.
1383 */
1384 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1385 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1386 tbd++;
1387 if (frag_idx == 6 && frag_cnt > 7) {
1388 /* Let's tack on an sglist.
1389 * Our control block will now
1390 * look like this:
1391 * iocb->seg[0] = skb->data
1392 * iocb->seg[1] = frag[0]
1393 * iocb->seg[2] = frag[1]
1394 * iocb->seg[3] = frag[2]
1395 * iocb->seg[4] = frag[3]
1396 * iocb->seg[5] = frag[4]
1397 * iocb->seg[6] = frag[5]
1398 * iocb->seg[7] = ptr to OAL (external sglist)
1399 * oal->seg[0] = frag[6]
1400 * oal->seg[1] = frag[7]
1401 * oal->seg[2] = frag[8]
1402 * oal->seg[3] = frag[9]
1403 * oal->seg[4] = frag[10]
1404 * etc...
1405 */
1406 /* Tack on the OAL in the eighth segment of IOCB. */
1407 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1408 sizeof(struct oal),
1409 PCI_DMA_TODEVICE);
1410 err = pci_dma_mapping_error(qdev->pdev, map);
1411 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001412 netif_err(qdev, tx_queued, qdev->ndev,
1413 "PCI mapping outbound address list with error: %d\n",
1414 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001415 goto map_error;
1416 }
1417
1418 tbd->addr = cpu_to_le64(map);
1419 /*
1420 * The length is the number of fragments
1421 * that remain to be mapped times the length
1422 * of our sglist (OAL).
1423 */
1424 tbd->len =
1425 cpu_to_le32((sizeof(struct tx_buf_desc) *
1426 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001427 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001428 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001429 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001430 sizeof(struct oal));
1431 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1432 map_idx++;
1433 }
1434
1435 map =
1436 pci_map_page(qdev->pdev, frag->page,
1437 frag->page_offset, frag->size,
1438 PCI_DMA_TODEVICE);
1439
1440 err = pci_dma_mapping_error(qdev->pdev, map);
1441 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001442 netif_err(qdev, tx_queued, qdev->ndev,
1443 "PCI mapping frags failed with error: %d.\n",
1444 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001445 goto map_error;
1446 }
1447
1448 tbd->addr = cpu_to_le64(map);
1449 tbd->len = cpu_to_le32(frag->size);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001450 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1451 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001452 frag->size);
1453
1454 }
1455 /* Save the number of segments we've mapped. */
1456 tx_ring_desc->map_cnt = map_idx;
1457 /* Terminate the last segment. */
1458 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1459 return NETDEV_TX_OK;
1460
1461map_error:
1462 /*
1463 * If the first frag mapping failed, then i will be zero.
1464 * This causes the unmap of the skb->data area. Otherwise
1465 * we pass in the number of frags that mapped successfully
1466 * so they can be umapped.
1467 */
1468 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1469 return NETDEV_TX_BUSY;
1470}
1471
Ron Mercer4f848c02010-01-02 10:37:43 +00001472/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001473static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1474 struct rx_ring *rx_ring,
1475 struct ib_mac_iocb_rsp *ib_mac_rsp,
1476 u32 length,
1477 u16 vlan_id)
1478{
1479 struct sk_buff *skb;
1480 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1481 struct skb_frag_struct *rx_frag;
1482 int nr_frags;
1483 struct napi_struct *napi = &rx_ring->napi;
1484
1485 napi->dev = qdev->ndev;
1486
1487 skb = napi_get_frags(napi);
1488 if (!skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001489 netif_err(qdev, drv, qdev->ndev,
1490 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001491 rx_ring->rx_dropped++;
1492 put_page(lbq_desc->p.pg_chunk.page);
1493 return;
1494 }
1495 prefetch(lbq_desc->p.pg_chunk.va);
1496 rx_frag = skb_shinfo(skb)->frags;
1497 nr_frags = skb_shinfo(skb)->nr_frags;
1498 rx_frag += nr_frags;
1499 rx_frag->page = lbq_desc->p.pg_chunk.page;
1500 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1501 rx_frag->size = length;
1502
1503 skb->len += length;
1504 skb->data_len += length;
1505 skb->truesize += length;
1506 skb_shinfo(skb)->nr_frags++;
1507
1508 rx_ring->rx_packets++;
1509 rx_ring->rx_bytes += length;
1510 skb->ip_summed = CHECKSUM_UNNECESSARY;
1511 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001512 if (vlan_id != 0xffff)
1513 __vlan_hwaccel_put_tag(skb, vlan_id);
1514 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001515}
1516
1517/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001518static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1519 struct rx_ring *rx_ring,
1520 struct ib_mac_iocb_rsp *ib_mac_rsp,
1521 u32 length,
1522 u16 vlan_id)
1523{
1524 struct net_device *ndev = qdev->ndev;
1525 struct sk_buff *skb = NULL;
1526 void *addr;
1527 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1528 struct napi_struct *napi = &rx_ring->napi;
1529
1530 skb = netdev_alloc_skb(ndev, length);
1531 if (!skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001532 netif_err(qdev, drv, qdev->ndev,
1533 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001534 rx_ring->rx_dropped++;
1535 put_page(lbq_desc->p.pg_chunk.page);
1536 return;
1537 }
1538
1539 addr = lbq_desc->p.pg_chunk.va;
1540 prefetch(addr);
1541
1542
1543 /* Frame error, so drop the packet. */
1544 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001545 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00001546 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001547 rx_ring->rx_errors++;
1548 goto err_out;
1549 }
1550
1551 /* The max framesize filter on this chip is set higher than
1552 * MTU since FCoE uses 2k frames.
1553 */
1554 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001555 netif_err(qdev, drv, qdev->ndev,
1556 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001557 rx_ring->rx_dropped++;
1558 goto err_out;
1559 }
1560 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f2010-02-09 11:49:52 +00001561 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1563 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001564 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1565 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1566 length-ETH_HLEN);
1567 skb->len += length-ETH_HLEN;
1568 skb->data_len += length-ETH_HLEN;
1569 skb->truesize += length-ETH_HLEN;
1570
1571 rx_ring->rx_packets++;
1572 rx_ring->rx_bytes += skb->len;
1573 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001574 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001575
Michał Mirosław88230fd2011-04-18 13:31:21 +00001576 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001577 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1578 /* TCP frame. */
1579 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001580 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1581 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1584 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1585 /* Unfragmented ipv4 UDP frame. */
1586 struct iphdr *iph = (struct iphdr *) skb->data;
1587 if (!(iph->frag_off &
1588 cpu_to_be16(IP_MF|IP_OFFSET))) {
1589 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00001590 netif_printk(qdev, rx_status, KERN_DEBUG,
1591 qdev->ndev,
1592 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001593 }
1594 }
1595 }
1596
1597 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001598 if (vlan_id != 0xffff)
1599 __vlan_hwaccel_put_tag(skb, vlan_id);
1600 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1601 napi_gro_receive(napi, skb);
1602 else
1603 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001604 return;
1605err_out:
1606 dev_kfree_skb_any(skb);
1607 put_page(lbq_desc->p.pg_chunk.page);
1608}
1609
1610/* Process an inbound completion from an rx ring. */
1611static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1612 struct rx_ring *rx_ring,
1613 struct ib_mac_iocb_rsp *ib_mac_rsp,
1614 u32 length,
1615 u16 vlan_id)
1616{
1617 struct net_device *ndev = qdev->ndev;
1618 struct sk_buff *skb = NULL;
1619 struct sk_buff *new_skb = NULL;
1620 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1621
1622 skb = sbq_desc->p.skb;
1623 /* Allocate new_skb and copy */
1624 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1625 if (new_skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001626 netif_err(qdev, probe, qdev->ndev,
1627 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001628 rx_ring->rx_dropped++;
1629 return;
1630 }
1631 skb_reserve(new_skb, NET_IP_ALIGN);
1632 memcpy(skb_put(new_skb, length), skb->data, length);
1633 skb = new_skb;
1634
1635 /* Frame error, so drop the packet. */
1636 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001637 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00001638 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_errors++;
1641 return;
1642 }
1643
1644 /* loopback self test for ethtool */
1645 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1646 ql_check_lb_frame(qdev, skb);
1647 dev_kfree_skb_any(skb);
1648 return;
1649 }
1650
1651 /* The max framesize filter on this chip is set higher than
1652 * MTU since FCoE uses 2k frames.
1653 */
1654 if (skb->len > ndev->mtu + ETH_HLEN) {
1655 dev_kfree_skb_any(skb);
1656 rx_ring->rx_dropped++;
1657 return;
1658 }
1659
1660 prefetch(skb->data);
1661 skb->dev = ndev;
1662 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001663 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1664 "%s Multicast.\n",
1665 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1666 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1667 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1668 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1669 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001671 }
1672 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f2010-02-09 11:49:52 +00001673 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1674 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001675
1676 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001679 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001680
1681 /* If rx checksum is on, and there are no
1682 * csum or frame errors.
1683 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001684 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001685 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1686 /* TCP frame. */
1687 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001690 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1692 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1693 /* Unfragmented ipv4 UDP frame. */
1694 struct iphdr *iph = (struct iphdr *) skb->data;
1695 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00001696 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001697 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00001698 netif_printk(qdev, rx_status, KERN_DEBUG,
1699 qdev->ndev,
1700 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001701 }
1702 }
1703 }
1704
1705 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001706 if (vlan_id != 0xffff)
1707 __vlan_hwaccel_put_tag(skb, vlan_id);
1708 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1709 napi_gro_receive(&rx_ring->napi, skb);
1710 else
1711 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001712}
1713
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001714static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001715{
1716 void *temp_addr = skb->data;
1717
1718 /* Undo the skb_reserve(skb,32) we did before
1719 * giving to hardware, and realign data on
1720 * a 2-byte boundary.
1721 */
1722 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1723 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1724 skb_copy_to_linear_data(skb, temp_addr,
1725 (unsigned int)len);
1726}
1727
1728/*
1729 * This function builds an skb for the given inbound
1730 * completion. It will be rewritten for readability in the near
1731 * future, but for not it works well.
1732 */
1733static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1734 struct rx_ring *rx_ring,
1735 struct ib_mac_iocb_rsp *ib_mac_rsp)
1736{
1737 struct bq_desc *lbq_desc;
1738 struct bq_desc *sbq_desc;
1739 struct sk_buff *skb = NULL;
1740 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1741 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1742
1743 /*
1744 * Handle the header buffer if present.
1745 */
1746 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1747 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001748 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1749 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001750 /*
1751 * Headers fit nicely into a small buffer.
1752 */
1753 sbq_desc = ql_get_curr_sbuf(rx_ring);
1754 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001755 dma_unmap_addr(sbq_desc, mapaddr),
1756 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001757 PCI_DMA_FROMDEVICE);
1758 skb = sbq_desc->p.skb;
1759 ql_realign_skb(skb, hdr_len);
1760 skb_put(skb, hdr_len);
1761 sbq_desc->p.skb = NULL;
1762 }
1763
1764 /*
1765 * Handle the data buffer(s).
1766 */
1767 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f2010-02-09 11:49:52 +00001768 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001770 return skb;
1771 }
1772
1773 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1774 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001775 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776 "Headers in small, data of %d bytes in small, combine them.\n",
1777 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001778 /*
1779 * Data is less than small buffer size so it's
1780 * stuffed in a small buffer.
1781 * For this case we append the data
1782 * from the "data" small buffer to the "header" small
1783 * buffer.
1784 */
1785 sbq_desc = ql_get_curr_sbuf(rx_ring);
1786 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001787 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001788 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001789 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001790 (sbq_desc, maplen),
1791 PCI_DMA_FROMDEVICE);
1792 memcpy(skb_put(skb, length),
1793 sbq_desc->p.skb->data, length);
1794 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001795 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 (sbq_desc,
1797 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001798 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001799 (sbq_desc,
1800 maplen),
1801 PCI_DMA_FROMDEVICE);
1802 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00001803 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804 "%d bytes in a single small buffer.\n",
1805 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 sbq_desc = ql_get_curr_sbuf(rx_ring);
1807 skb = sbq_desc->p.skb;
1808 ql_realign_skb(skb, length);
1809 skb_put(skb, length);
1810 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001811 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001812 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001813 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001814 maplen),
1815 PCI_DMA_FROMDEVICE);
1816 sbq_desc->p.skb = NULL;
1817 }
1818 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1819 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "Header in small, %d bytes in large. Chain large to small!\n",
1822 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001823 /*
1824 * The data is in a single large buffer. We
1825 * chain it to the header buffer's skb and let
1826 * it rip.
1827 */
Ron Mercer7c734352009-10-19 03:32:19 +00001828 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f2010-02-09 11:49:52 +00001829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "Chaining page at offset = %d, for %d bytes to skb.\n",
1831 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001832 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1833 lbq_desc->p.pg_chunk.offset,
1834 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001835 skb->len += length;
1836 skb->data_len += length;
1837 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001838 } else {
1839 /*
1840 * The headers and data are in a single large buffer. We
1841 * copy it to a new skb and let it go. This can happen with
1842 * jumbo mtu on a non-TCP/UDP frame.
1843 */
Ron Mercer7c734352009-10-19 03:32:19 +00001844 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001845 skb = netdev_alloc_skb(qdev->ndev, length);
1846 if (skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001847 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1848 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001849 return NULL;
1850 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001851 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001852 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d2009-01-04 17:07:09 -08001853 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001854 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d2009-01-04 17:07:09 -08001855 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001856 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f2010-02-09 11:49:52 +00001857 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1858 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1859 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001860 skb_fill_page_desc(skb, 0,
1861 lbq_desc->p.pg_chunk.page,
1862 lbq_desc->p.pg_chunk.offset,
1863 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001864 skb->len += length;
1865 skb->data_len += length;
1866 skb->truesize += length;
1867 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001868 __pskb_pull_tail(skb,
1869 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1870 VLAN_ETH_HLEN : ETH_HLEN);
1871 }
1872 } else {
1873 /*
1874 * The data is in a chain of large buffers
1875 * pointed to by a small buffer. We loop
1876 * thru and chain them to the our small header
1877 * buffer's skb.
1878 * frags: There are 18 max frags and our small
1879 * buffer will hold 32 of them. The thing is,
1880 * we'll use 3 max for our 9000 byte jumbo
1881 * frames. If the MTU goes up we could
1882 * eventually be in trouble.
1883 */
Ron Mercer7c734352009-10-19 03:32:19 +00001884 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001885 sbq_desc = ql_get_curr_sbuf(rx_ring);
1886 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001887 dma_unmap_addr(sbq_desc, mapaddr),
1888 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001889 PCI_DMA_FROMDEVICE);
1890 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1891 /*
1892 * This is an non TCP/UDP IP frame, so
1893 * the headers aren't split into a small
1894 * buffer. We have to use the small buffer
1895 * that contains our sg list as our skb to
1896 * send upstairs. Copy the sg list here to
1897 * a local buffer and use it to find the
1898 * pages to chain.
1899 */
Joe Perchesae9540f2010-02-09 11:49:52 +00001900 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1901 "%d bytes of headers & data in chain of large.\n",
1902 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001903 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001904 sbq_desc->p.skb = NULL;
1905 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001906 }
1907 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001908 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1909 size = (length < rx_ring->lbq_buf_size) ? length :
1910 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001911
Joe Perchesae9540f2010-02-09 11:49:52 +00001912 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1913 "Adding page %d to skb for %d bytes.\n",
1914 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001915 skb_fill_page_desc(skb, i,
1916 lbq_desc->p.pg_chunk.page,
1917 lbq_desc->p.pg_chunk.offset,
1918 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001919 skb->len += size;
1920 skb->data_len += size;
1921 skb->truesize += size;
1922 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001923 i++;
1924 }
1925 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1926 VLAN_ETH_HLEN : ETH_HLEN);
1927 }
1928 return skb;
1929}
1930
1931/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001932static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001933 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001934 struct ib_mac_iocb_rsp *ib_mac_rsp,
1935 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001936{
1937 struct net_device *ndev = qdev->ndev;
1938 struct sk_buff *skb = NULL;
1939
1940 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1941
1942 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1943 if (unlikely(!skb)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001944 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1945 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001946 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001947 return;
1948 }
1949
Ron Mercera32959c2009-06-09 05:39:27 +00001950 /* Frame error, so drop the packet. */
1951 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Ron Mercer3b11d362010-07-05 12:19:39 +00001952 netif_info(qdev, drv, qdev->ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00001953 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001954 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001955 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001956 return;
1957 }
Ron Mercerec33a492009-06-09 05:39:28 +00001958
1959 /* The max framesize filter on this chip is set higher than
1960 * MTU since FCoE uses 2k frames.
1961 */
1962 if (skb->len > ndev->mtu + ETH_HLEN) {
1963 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001964 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001965 return;
1966 }
1967
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001968 /* loopback self test for ethtool */
1969 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1970 ql_check_lb_frame(qdev, skb);
1971 dev_kfree_skb_any(skb);
1972 return;
1973 }
1974
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001975 prefetch(skb->data);
1976 skb->dev = ndev;
1977 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001978 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1979 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1980 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1981 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1982 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1983 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1984 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001985 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001986 }
1987 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001988 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1989 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001990 }
Ron Mercerd555f592009-03-09 10:59:19 +00001991
Ron Mercerd555f592009-03-09 10:59:19 +00001992 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001993 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00001994
1995 /* If rx checksum is on, and there are no
1996 * csum or frame errors.
1997 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001998 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00001999 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2000 /* TCP frame. */
2001 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002004 skb->ip_summed = CHECKSUM_UNNECESSARY;
2005 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2006 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2007 /* Unfragmented ipv4 UDP frame. */
2008 struct iphdr *iph = (struct iphdr *) skb->data;
2009 if (!(iph->frag_off &
Ron Mercer6d29b1e2010-07-05 12:19:40 +00002010 ntohs(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002011 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00002012 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2013 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002014 }
2015 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002016 }
Ron Mercerd555f592009-03-09 10:59:19 +00002017
Ron Mercer885ee392009-11-03 13:49:31 +00002018 rx_ring->rx_packets++;
2019 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002020 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002021 if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2022 __vlan_hwaccel_put_tag(skb, vlan_id);
2023 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2024 napi_gro_receive(&rx_ring->napi, skb);
2025 else
2026 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002027}
2028
Ron Mercer4f848c02010-01-02 10:37:43 +00002029/* Process an inbound completion from an rx ring. */
2030static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2031 struct rx_ring *rx_ring,
2032 struct ib_mac_iocb_rsp *ib_mac_rsp)
2033{
2034 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2035 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2036 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2037 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2038
2039 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2040
2041 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2042 /* The data and headers are split into
2043 * separate buffers.
2044 */
2045 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2046 vlan_id);
2047 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2048 /* The data fit in a single small buffer.
2049 * Allocate a new skb, copy the data and
2050 * return the buffer to the free pool.
2051 */
2052 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2053 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002054 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2055 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2056 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2057 /* TCP packet in a page chunk that's been checksummed.
2058 * Tack it on to our GRO skb and let it go.
2059 */
2060 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2061 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002062 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2063 /* Non-TCP packet in a page chunk. Allocate an
2064 * skb, tack it on frags, and send it up.
2065 */
2066 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2067 length, vlan_id);
2068 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002069 /* Non-TCP/UDP large frames that span multiple buffers
2070 * can be processed corrrectly by the split frame logic.
2071 */
2072 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2073 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002074 }
2075
2076 return (unsigned long)length;
2077}
2078
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002079/* Process an outbound completion from an rx ring. */
2080static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2081 struct ob_mac_iocb_rsp *mac_rsp)
2082{
2083 struct tx_ring *tx_ring;
2084 struct tx_ring_desc *tx_ring_desc;
2085
2086 QL_DUMP_OB_MAC_RSP(mac_rsp);
2087 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2088 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2089 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002090 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2091 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002092 dev_kfree_skb(tx_ring_desc->skb);
2093 tx_ring_desc->skb = NULL;
2094
2095 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2096 OB_MAC_IOCB_RSP_S |
2097 OB_MAC_IOCB_RSP_L |
2098 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2099 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002100 netif_warn(qdev, tx_done, qdev->ndev,
2101 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002102 }
2103 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002104 netif_warn(qdev, tx_done, qdev->ndev,
2105 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002106 }
2107 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002108 netif_warn(qdev, tx_done, qdev->ndev,
2109 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002110 }
2111 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002112 netif_warn(qdev, tx_done, qdev->ndev,
2113 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002114 }
2115 }
2116 atomic_inc(&tx_ring->tx_count);
2117}
2118
2119/* Fire up a handler to reset the MPI processor. */
2120void ql_queue_fw_error(struct ql_adapter *qdev)
2121{
Ron Mercer6a473302009-07-02 06:06:12 +00002122 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2124}
2125
2126void ql_queue_asic_error(struct ql_adapter *qdev)
2127{
Ron Mercer6a473302009-07-02 06:06:12 +00002128 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002129 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002130 /* Clear adapter up bit to signal the recovery
2131 * process that it shouldn't kill the reset worker
2132 * thread
2133 */
2134 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002135 /* Set asic recovery bit to indicate reset process that we are
2136 * in fatal error recovery process rather than normal close
2137 */
2138 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002139 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2140}
2141
2142static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2143 struct ib_ae_iocb_rsp *ib_ae_rsp)
2144{
2145 switch (ib_ae_rsp->event) {
2146 case MGMT_ERR_EVENT:
Joe Perchesae9540f2010-02-09 11:49:52 +00002147 netif_err(qdev, rx_err, qdev->ndev,
2148 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002149 ql_queue_fw_error(qdev);
2150 return;
2151
2152 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002153 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2154 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002155 ql_queue_asic_error(qdev);
2156 return;
2157
2158 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002159 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002160 ql_queue_asic_error(qdev);
2161 break;
2162
2163 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002164 netdev_err(qdev->ndev, "PCI error occurred when reading "
2165 "anonymous buffers from rx_ring %d.\n",
2166 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 ql_queue_asic_error(qdev);
2168 break;
2169
2170 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002171 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2172 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002173 ql_queue_asic_error(qdev);
2174 break;
2175 }
2176}
2177
2178static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2179{
2180 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002181 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002182 struct ob_mac_iocb_rsp *net_rsp = NULL;
2183 int count = 0;
2184
Ron Mercer1e213302009-03-09 10:59:21 +00002185 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002186 /* While there are entries in the completion queue. */
2187 while (prod != rx_ring->cnsmr_idx) {
2188
Joe Perchesae9540f2010-02-09 11:49:52 +00002189 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2190 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2191 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002192
2193 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2194 rmb();
2195 switch (net_rsp->opcode) {
2196
2197 case OPCODE_OB_MAC_TSO_IOCB:
2198 case OPCODE_OB_MAC_IOCB:
2199 ql_process_mac_tx_intr(qdev, net_rsp);
2200 break;
2201 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002202 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2203 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2204 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002205 }
2206 count++;
2207 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002208 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002209 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002210 if (!net_rsp)
2211 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002212 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002213 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002214 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002215 if (atomic_read(&tx_ring->queue_stopped) &&
2216 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2217 /*
2218 * The queue got stopped because the tx_ring was full.
2219 * Wake it up, because it's now at least 25% empty.
2220 */
Ron Mercer1e213302009-03-09 10:59:21 +00002221 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002222 }
2223
2224 return count;
2225}
2226
2227static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2228{
2229 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002230 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002231 struct ql_net_rsp_iocb *net_rsp;
2232 int count = 0;
2233
2234 /* While there are entries in the completion queue. */
2235 while (prod != rx_ring->cnsmr_idx) {
2236
Joe Perchesae9540f2010-02-09 11:49:52 +00002237 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2238 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2239 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002240
2241 net_rsp = rx_ring->curr_entry;
2242 rmb();
2243 switch (net_rsp->opcode) {
2244 case OPCODE_IB_MAC_IOCB:
2245 ql_process_mac_rx_intr(qdev, rx_ring,
2246 (struct ib_mac_iocb_rsp *)
2247 net_rsp);
2248 break;
2249
2250 case OPCODE_IB_AE_IOCB:
2251 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2252 net_rsp);
2253 break;
2254 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002255 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2257 net_rsp->opcode);
2258 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002259 }
2260 count++;
2261 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002262 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002263 if (count == budget)
2264 break;
2265 }
2266 ql_update_buffer_queues(qdev, rx_ring);
2267 ql_write_cq_idx(rx_ring);
2268 return count;
2269}
2270
2271static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2272{
2273 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2274 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002275 struct rx_ring *trx_ring;
2276 int i, work_done = 0;
2277 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002278
Joe Perchesae9540f2010-02-09 11:49:52 +00002279 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2280 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002281
Ron Mercer39aa8162009-08-27 11:02:11 +00002282 /* Service the TX rings first. They start
2283 * right after the RSS rings. */
2284 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2285 trx_ring = &qdev->rx_ring[i];
2286 /* If this TX completion ring belongs to this vector and
2287 * it's not empty then service it.
2288 */
2289 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2290 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2291 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002292 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2293 "%s: Servicing TX completion ring %d.\n",
2294 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002295 ql_clean_outbound_rx_ring(trx_ring);
2296 }
2297 }
2298
2299 /*
2300 * Now service the RSS ring if it's active.
2301 */
2302 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2303 rx_ring->cnsmr_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002304 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2305 "%s: Servicing RX completion ring %d.\n",
2306 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002307 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2308 }
2309
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002310 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002311 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002312 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2313 }
2314 return work_done;
2315}
2316
Jiri Pirko18c49b92011-07-21 03:24:11 +00002317static void qlge_vlan_mode(struct net_device *ndev, u32 features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002318{
2319 struct ql_adapter *qdev = netdev_priv(ndev);
2320
Jiri Pirko18c49b92011-07-21 03:24:11 +00002321 if (features & NETIF_F_HW_VLAN_RX) {
2322 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00002323 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002324 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002325 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002326 } else {
Jiri Pirko18c49b92011-07-21 03:24:11 +00002327 netif_printk(qdev, ifup, KERN_DEBUG, ndev,
Joe Perchesae9540f2010-02-09 11:49:52 +00002328 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002329 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2330 }
2331}
2332
Jiri Pirko18c49b92011-07-21 03:24:11 +00002333static u32 qlge_fix_features(struct net_device *ndev, u32 features)
2334{
2335 /*
2336 * Since there is no support for separate rx/tx vlan accel
2337 * enable/disable make sure tx flag is always in same state as rx.
2338 */
2339 if (features & NETIF_F_HW_VLAN_RX)
2340 features |= NETIF_F_HW_VLAN_TX;
2341 else
2342 features &= ~NETIF_F_HW_VLAN_TX;
2343
2344 return features;
2345}
2346
2347static int qlge_set_features(struct net_device *ndev, u32 features)
2348{
2349 u32 changed = ndev->features ^ features;
2350
2351 if (changed & NETIF_F_HW_VLAN_RX)
2352 qlge_vlan_mode(ndev, features);
2353
2354 return 0;
2355}
2356
2357static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2358{
2359 u32 enable_bit = MAC_ADDR_E;
2360
2361 if (ql_set_mac_addr_reg
2362 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2363 netif_err(qdev, ifup, qdev->ndev,
2364 "Failed to init vlan address.\n");
2365 }
2366}
2367
Ron Mercer01e6b952009-10-30 12:13:34 +00002368static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002369{
2370 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002371 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002372
Ron Mercercc288f52009-02-23 10:42:14 +00002373 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2374 if (status)
2375 return;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002376
2377 __qlge_vlan_rx_add_vid(qdev, vid);
2378 set_bit(vid, qdev->active_vlans);
2379
Ron Mercercc288f52009-02-23 10:42:14 +00002380 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002381}
2382
Jiri Pirko18c49b92011-07-21 03:24:11 +00002383static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002384{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002385 u32 enable_bit = 0;
2386
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002387 if (ql_set_mac_addr_reg
2388 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002389 netif_err(qdev, ifup, qdev->ndev,
2390 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002391 }
Jiri Pirko18c49b92011-07-21 03:24:11 +00002392}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002393
Jiri Pirko18c49b92011-07-21 03:24:11 +00002394static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2395{
2396 struct ql_adapter *qdev = netdev_priv(ndev);
2397 int status;
2398
2399 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2400 if (status)
2401 return;
2402
2403 __qlge_vlan_rx_kill_vid(qdev, vid);
2404 clear_bit(vid, qdev->active_vlans);
2405
2406 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002407}
2408
Ron Mercerc1b60092010-10-27 04:58:12 +00002409static void qlge_restore_vlan(struct ql_adapter *qdev)
2410{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002411 int status;
2412 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002413
Jiri Pirko18c49b92011-07-21 03:24:11 +00002414 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2415 if (status)
2416 return;
2417
2418 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2419 __qlge_vlan_rx_add_vid(qdev, vid);
2420
2421 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002422}
2423
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002424/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2425static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2426{
2427 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002428 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002429 return IRQ_HANDLED;
2430}
2431
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002432/* This handles a fatal error, MPI activity, and the default
2433 * rx_ring in an MSI-X multiple vector environment.
2434 * In MSI/Legacy environment it also process the rest of
2435 * the rx_rings.
2436 */
2437static irqreturn_t qlge_isr(int irq, void *dev_id)
2438{
2439 struct rx_ring *rx_ring = dev_id;
2440 struct ql_adapter *qdev = rx_ring->qdev;
2441 struct intr_context *intr_context = &qdev->intr_context[0];
2442 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002443 int work_done = 0;
2444
Ron Mercerbb0d2152008-10-20 10:30:26 -07002445 spin_lock(&qdev->hw_lock);
2446 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002447 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2448 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002449 spin_unlock(&qdev->hw_lock);
2450 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002451 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002452 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002453
Ron Mercerbb0d2152008-10-20 10:30:26 -07002454 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002455
2456 /*
2457 * Check for fatal error.
2458 */
2459 if (var & STS_FE) {
2460 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002461 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002462 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002463 netdev_err(qdev->ndev, "Resetting chip. "
2464 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002465 return IRQ_HANDLED;
2466 }
2467
2468 /*
2469 * Check MPI processor activity.
2470 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002471 if ((var & STS_PI) &&
2472 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002473 /*
2474 * We've got an async event or mailbox completion.
2475 * Handle it and clear the source of the interrupt.
2476 */
Joe Perchesae9540f2010-02-09 11:49:52 +00002477 netif_err(qdev, intr, qdev->ndev,
2478 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002479 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002480 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2481 queue_delayed_work_on(smp_processor_id(),
2482 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002483 work_done++;
2484 }
2485
2486 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002487 * Get the bit-mask that shows the active queues for this
2488 * pass. Compare it to the queues that this irq services
2489 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002490 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002491 var = ql_read32(qdev, ISR1);
2492 if (var & intr_context->irq_mask) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002493 netif_info(qdev, intr, qdev->ndev,
2494 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002495 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002496 napi_schedule(&rx_ring->napi);
2497 work_done++;
2498 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002499 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002500 return work_done ? IRQ_HANDLED : IRQ_NONE;
2501}
2502
2503static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2504{
2505
2506 if (skb_is_gso(skb)) {
2507 int err;
2508 if (skb_header_cloned(skb)) {
2509 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2510 if (err)
2511 return err;
2512 }
2513
2514 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2515 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2516 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2517 mac_iocb_ptr->total_hdrs_len =
2518 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2519 mac_iocb_ptr->net_trans_offset =
2520 cpu_to_le16(skb_network_offset(skb) |
2521 skb_transport_offset(skb)
2522 << OB_MAC_TRANSPORT_HDR_SHIFT);
2523 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2524 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2525 if (likely(skb->protocol == htons(ETH_P_IP))) {
2526 struct iphdr *iph = ip_hdr(skb);
2527 iph->check = 0;
2528 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2529 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2530 iph->daddr, 0,
2531 IPPROTO_TCP,
2532 0);
2533 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2534 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2535 tcp_hdr(skb)->check =
2536 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2537 &ipv6_hdr(skb)->daddr,
2538 0, IPPROTO_TCP, 0);
2539 }
2540 return 1;
2541 }
2542 return 0;
2543}
2544
2545static void ql_hw_csum_setup(struct sk_buff *skb,
2546 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2547{
2548 int len;
2549 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002550 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002551 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2552 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2553 mac_iocb_ptr->net_trans_offset =
2554 cpu_to_le16(skb_network_offset(skb) |
2555 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2556
2557 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2558 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2559 if (likely(iph->protocol == IPPROTO_TCP)) {
2560 check = &(tcp_hdr(skb)->check);
2561 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2562 mac_iocb_ptr->total_hdrs_len =
2563 cpu_to_le16(skb_transport_offset(skb) +
2564 (tcp_hdr(skb)->doff << 2));
2565 } else {
2566 check = &(udp_hdr(skb)->check);
2567 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2568 mac_iocb_ptr->total_hdrs_len =
2569 cpu_to_le16(skb_transport_offset(skb) +
2570 sizeof(struct udphdr));
2571 }
2572 *check = ~csum_tcpudp_magic(iph->saddr,
2573 iph->daddr, len, iph->protocol, 0);
2574}
2575
Stephen Hemminger613573252009-08-31 19:50:58 +00002576static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002577{
2578 struct tx_ring_desc *tx_ring_desc;
2579 struct ob_mac_iocb_req *mac_iocb_ptr;
2580 struct ql_adapter *qdev = netdev_priv(ndev);
2581 int tso;
2582 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002583 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002584
2585 tx_ring = &qdev->tx_ring[tx_ring_idx];
2586
Ron Mercer74c50b42009-03-09 10:59:27 +00002587 if (skb_padto(skb, ETH_ZLEN))
2588 return NETDEV_TX_OK;
2589
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002590 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002591 netif_info(qdev, tx_queued, qdev->ndev,
2592 "%s: shutting down tx queue %d du to lack of resources.\n",
2593 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002594 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002595 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002596 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002597 return NETDEV_TX_BUSY;
2598 }
2599 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2600 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002601 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002602
2603 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2604 mac_iocb_ptr->tid = tx_ring_desc->index;
2605 /* We use the upper 32-bits to store the tx queue for this IO.
2606 * When we get the completion we can use it to establish the context.
2607 */
2608 mac_iocb_ptr->txq_idx = tx_ring_idx;
2609 tx_ring_desc->skb = skb;
2610
2611 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2612
Jesse Grosseab6d182010-10-20 13:56:03 +00002613 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002614 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2615 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002616 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2617 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2618 }
2619 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2620 if (tso < 0) {
2621 dev_kfree_skb_any(skb);
2622 return NETDEV_TX_OK;
2623 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2624 ql_hw_csum_setup(skb,
2625 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2626 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002627 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2628 NETDEV_TX_OK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002629 netif_err(qdev, tx_queued, qdev->ndev,
2630 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002631 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002632 return NETDEV_TX_BUSY;
2633 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002634 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2635 tx_ring->prod_idx++;
2636 if (tx_ring->prod_idx == tx_ring->wq_len)
2637 tx_ring->prod_idx = 0;
2638 wmb();
2639
2640 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f2010-02-09 11:49:52 +00002641 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2642 "tx queued, slot %d, len %d\n",
2643 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002644
2645 atomic_dec(&tx_ring->tx_count);
2646 return NETDEV_TX_OK;
2647}
2648
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002649
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002650static void ql_free_shadow_space(struct ql_adapter *qdev)
2651{
2652 if (qdev->rx_ring_shadow_reg_area) {
2653 pci_free_consistent(qdev->pdev,
2654 PAGE_SIZE,
2655 qdev->rx_ring_shadow_reg_area,
2656 qdev->rx_ring_shadow_reg_dma);
2657 qdev->rx_ring_shadow_reg_area = NULL;
2658 }
2659 if (qdev->tx_ring_shadow_reg_area) {
2660 pci_free_consistent(qdev->pdev,
2661 PAGE_SIZE,
2662 qdev->tx_ring_shadow_reg_area,
2663 qdev->tx_ring_shadow_reg_dma);
2664 qdev->tx_ring_shadow_reg_area = NULL;
2665 }
2666}
2667
2668static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2669{
2670 qdev->rx_ring_shadow_reg_area =
2671 pci_alloc_consistent(qdev->pdev,
2672 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2673 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002674 netif_err(qdev, ifup, qdev->ndev,
2675 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002676 return -ENOMEM;
2677 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002678 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002679 qdev->tx_ring_shadow_reg_area =
2680 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2681 &qdev->tx_ring_shadow_reg_dma);
2682 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002683 netif_err(qdev, ifup, qdev->ndev,
2684 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002685 goto err_wqp_sh_area;
2686 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002687 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002688 return 0;
2689
2690err_wqp_sh_area:
2691 pci_free_consistent(qdev->pdev,
2692 PAGE_SIZE,
2693 qdev->rx_ring_shadow_reg_area,
2694 qdev->rx_ring_shadow_reg_dma);
2695 return -ENOMEM;
2696}
2697
2698static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2699{
2700 struct tx_ring_desc *tx_ring_desc;
2701 int i;
2702 struct ob_mac_iocb_req *mac_iocb_ptr;
2703
2704 mac_iocb_ptr = tx_ring->wq_base;
2705 tx_ring_desc = tx_ring->q;
2706 for (i = 0; i < tx_ring->wq_len; i++) {
2707 tx_ring_desc->index = i;
2708 tx_ring_desc->skb = NULL;
2709 tx_ring_desc->queue_entry = mac_iocb_ptr;
2710 mac_iocb_ptr++;
2711 tx_ring_desc++;
2712 }
2713 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2714 atomic_set(&tx_ring->queue_stopped, 0);
2715}
2716
2717static void ql_free_tx_resources(struct ql_adapter *qdev,
2718 struct tx_ring *tx_ring)
2719{
2720 if (tx_ring->wq_base) {
2721 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2722 tx_ring->wq_base, tx_ring->wq_base_dma);
2723 tx_ring->wq_base = NULL;
2724 }
2725 kfree(tx_ring->q);
2726 tx_ring->q = NULL;
2727}
2728
2729static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2730 struct tx_ring *tx_ring)
2731{
2732 tx_ring->wq_base =
2733 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2734 &tx_ring->wq_base_dma);
2735
Joe Perches8e95a202009-12-03 07:58:21 +00002736 if ((tx_ring->wq_base == NULL) ||
2737 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002738 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002739 return -ENOMEM;
2740 }
2741 tx_ring->q =
2742 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2743 if (tx_ring->q == NULL)
2744 goto err;
2745
2746 return 0;
2747err:
2748 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2749 tx_ring->wq_base, tx_ring->wq_base_dma);
2750 return -ENOMEM;
2751}
2752
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002753static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002754{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002755 struct bq_desc *lbq_desc;
2756
Ron Mercer7c734352009-10-19 03:32:19 +00002757 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002758
Ron Mercer7c734352009-10-19 03:32:19 +00002759 curr_idx = rx_ring->lbq_curr_idx;
2760 clean_idx = rx_ring->lbq_clean_idx;
2761 while (curr_idx != clean_idx) {
2762 lbq_desc = &rx_ring->lbq[curr_idx];
2763
2764 if (lbq_desc->p.pg_chunk.last_flag) {
2765 pci_unmap_page(qdev->pdev,
2766 lbq_desc->p.pg_chunk.map,
2767 ql_lbq_block_size(qdev),
2768 PCI_DMA_FROMDEVICE);
2769 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002770 }
Ron Mercer7c734352009-10-19 03:32:19 +00002771
2772 put_page(lbq_desc->p.pg_chunk.page);
2773 lbq_desc->p.pg_chunk.page = NULL;
2774
2775 if (++curr_idx == rx_ring->lbq_len)
2776 curr_idx = 0;
2777
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002778 }
2779}
2780
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002781static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002782{
2783 int i;
2784 struct bq_desc *sbq_desc;
2785
2786 for (i = 0; i < rx_ring->sbq_len; i++) {
2787 sbq_desc = &rx_ring->sbq[i];
2788 if (sbq_desc == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002789 netif_err(qdev, ifup, qdev->ndev,
2790 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002791 return;
2792 }
2793 if (sbq_desc->p.skb) {
2794 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002795 dma_unmap_addr(sbq_desc, mapaddr),
2796 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002797 PCI_DMA_FROMDEVICE);
2798 dev_kfree_skb(sbq_desc->p.skb);
2799 sbq_desc->p.skb = NULL;
2800 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002801 }
2802}
2803
Ron Mercer4545a3f2009-02-23 10:42:17 +00002804/* Free all large and small rx buffers associated
2805 * with the completion queues for this device.
2806 */
2807static void ql_free_rx_buffers(struct ql_adapter *qdev)
2808{
2809 int i;
2810 struct rx_ring *rx_ring;
2811
2812 for (i = 0; i < qdev->rx_ring_count; i++) {
2813 rx_ring = &qdev->rx_ring[i];
2814 if (rx_ring->lbq)
2815 ql_free_lbq_buffers(qdev, rx_ring);
2816 if (rx_ring->sbq)
2817 ql_free_sbq_buffers(qdev, rx_ring);
2818 }
2819}
2820
2821static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2822{
2823 struct rx_ring *rx_ring;
2824 int i;
2825
2826 for (i = 0; i < qdev->rx_ring_count; i++) {
2827 rx_ring = &qdev->rx_ring[i];
2828 if (rx_ring->type != TX_Q)
2829 ql_update_buffer_queues(qdev, rx_ring);
2830 }
2831}
2832
2833static void ql_init_lbq_ring(struct ql_adapter *qdev,
2834 struct rx_ring *rx_ring)
2835{
2836 int i;
2837 struct bq_desc *lbq_desc;
2838 __le64 *bq = rx_ring->lbq_base;
2839
2840 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2841 for (i = 0; i < rx_ring->lbq_len; i++) {
2842 lbq_desc = &rx_ring->lbq[i];
2843 memset(lbq_desc, 0, sizeof(*lbq_desc));
2844 lbq_desc->index = i;
2845 lbq_desc->addr = bq;
2846 bq++;
2847 }
2848}
2849
2850static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002851 struct rx_ring *rx_ring)
2852{
2853 int i;
2854 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002855 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002856
Ron Mercer4545a3f2009-02-23 10:42:17 +00002857 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002858 for (i = 0; i < rx_ring->sbq_len; i++) {
2859 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002860 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002861 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002862 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002863 bq++;
2864 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002865}
2866
2867static void ql_free_rx_resources(struct ql_adapter *qdev,
2868 struct rx_ring *rx_ring)
2869{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002870 /* Free the small buffer queue. */
2871 if (rx_ring->sbq_base) {
2872 pci_free_consistent(qdev->pdev,
2873 rx_ring->sbq_size,
2874 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2875 rx_ring->sbq_base = NULL;
2876 }
2877
2878 /* Free the small buffer queue control blocks. */
2879 kfree(rx_ring->sbq);
2880 rx_ring->sbq = NULL;
2881
2882 /* Free the large buffer queue. */
2883 if (rx_ring->lbq_base) {
2884 pci_free_consistent(qdev->pdev,
2885 rx_ring->lbq_size,
2886 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2887 rx_ring->lbq_base = NULL;
2888 }
2889
2890 /* Free the large buffer queue control blocks. */
2891 kfree(rx_ring->lbq);
2892 rx_ring->lbq = NULL;
2893
2894 /* Free the rx queue. */
2895 if (rx_ring->cq_base) {
2896 pci_free_consistent(qdev->pdev,
2897 rx_ring->cq_size,
2898 rx_ring->cq_base, rx_ring->cq_base_dma);
2899 rx_ring->cq_base = NULL;
2900 }
2901}
2902
2903/* Allocate queues and buffers for this completions queue based
2904 * on the values in the parameter structure. */
2905static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2906 struct rx_ring *rx_ring)
2907{
2908
2909 /*
2910 * Allocate the completion queue for this rx_ring.
2911 */
2912 rx_ring->cq_base =
2913 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2914 &rx_ring->cq_base_dma);
2915
2916 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002917 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002918 return -ENOMEM;
2919 }
2920
2921 if (rx_ring->sbq_len) {
2922 /*
2923 * Allocate small buffer queue.
2924 */
2925 rx_ring->sbq_base =
2926 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2927 &rx_ring->sbq_base_dma);
2928
2929 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002930 netif_err(qdev, ifup, qdev->ndev,
2931 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002932 goto err_mem;
2933 }
2934
2935 /*
2936 * Allocate small buffer queue control blocks.
2937 */
2938 rx_ring->sbq =
2939 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2940 GFP_KERNEL);
2941 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002942 netif_err(qdev, ifup, qdev->ndev,
2943 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002944 goto err_mem;
2945 }
2946
Ron Mercer4545a3f2009-02-23 10:42:17 +00002947 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002948 }
2949
2950 if (rx_ring->lbq_len) {
2951 /*
2952 * Allocate large buffer queue.
2953 */
2954 rx_ring->lbq_base =
2955 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2956 &rx_ring->lbq_base_dma);
2957
2958 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002959 netif_err(qdev, ifup, qdev->ndev,
2960 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002961 goto err_mem;
2962 }
2963 /*
2964 * Allocate large buffer queue control blocks.
2965 */
2966 rx_ring->lbq =
2967 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2968 GFP_KERNEL);
2969 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002970 netif_err(qdev, ifup, qdev->ndev,
2971 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002972 goto err_mem;
2973 }
2974
Ron Mercer4545a3f2009-02-23 10:42:17 +00002975 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002976 }
2977
2978 return 0;
2979
2980err_mem:
2981 ql_free_rx_resources(qdev, rx_ring);
2982 return -ENOMEM;
2983}
2984
2985static void ql_tx_ring_clean(struct ql_adapter *qdev)
2986{
2987 struct tx_ring *tx_ring;
2988 struct tx_ring_desc *tx_ring_desc;
2989 int i, j;
2990
2991 /*
2992 * Loop through all queues and free
2993 * any resources.
2994 */
2995 for (j = 0; j < qdev->tx_ring_count; j++) {
2996 tx_ring = &qdev->tx_ring[j];
2997 for (i = 0; i < tx_ring->wq_len; i++) {
2998 tx_ring_desc = &tx_ring->q[i];
2999 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003000 netif_err(qdev, ifdown, qdev->ndev,
3001 "Freeing lost SKB %p, from queue %d, index %d.\n",
3002 tx_ring_desc->skb, j,
3003 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003004 ql_unmap_send(qdev, tx_ring_desc,
3005 tx_ring_desc->map_cnt);
3006 dev_kfree_skb(tx_ring_desc->skb);
3007 tx_ring_desc->skb = NULL;
3008 }
3009 }
3010 }
3011}
3012
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003013static void ql_free_mem_resources(struct ql_adapter *qdev)
3014{
3015 int i;
3016
3017 for (i = 0; i < qdev->tx_ring_count; i++)
3018 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3019 for (i = 0; i < qdev->rx_ring_count; i++)
3020 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3021 ql_free_shadow_space(qdev);
3022}
3023
3024static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3025{
3026 int i;
3027
3028 /* Allocate space for our shadow registers and such. */
3029 if (ql_alloc_shadow_space(qdev))
3030 return -ENOMEM;
3031
3032 for (i = 0; i < qdev->rx_ring_count; i++) {
3033 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003034 netif_err(qdev, ifup, qdev->ndev,
3035 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003036 goto err_mem;
3037 }
3038 }
3039 /* Allocate tx queue resources */
3040 for (i = 0; i < qdev->tx_ring_count; i++) {
3041 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003042 netif_err(qdev, ifup, qdev->ndev,
3043 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003044 goto err_mem;
3045 }
3046 }
3047 return 0;
3048
3049err_mem:
3050 ql_free_mem_resources(qdev);
3051 return -ENOMEM;
3052}
3053
3054/* Set up the rx ring control block and pass it to the chip.
3055 * The control block is defined as
3056 * "Completion Queue Initialization Control Block", or cqicb.
3057 */
3058static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3059{
3060 struct cqicb *cqicb = &rx_ring->cqicb;
3061 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003062 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003063 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003064 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003065 void __iomem *doorbell_area =
3066 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3067 int err = 0;
3068 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003069 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003070 __le64 *base_indirect_ptr;
3071 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072
3073 /* Set up the shadow registers for this ring. */
3074 rx_ring->prod_idx_sh_reg = shadow_reg;
3075 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003076 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003077 shadow_reg += sizeof(u64);
3078 shadow_reg_dma += sizeof(u64);
3079 rx_ring->lbq_base_indirect = shadow_reg;
3080 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003081 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3082 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003083 rx_ring->sbq_base_indirect = shadow_reg;
3084 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3085
3086 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003087 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003088 rx_ring->cnsmr_idx = 0;
3089 rx_ring->curr_entry = rx_ring->cq_base;
3090
3091 /* PCI doorbell mem area + 0x04 for valid register */
3092 rx_ring->valid_db_reg = doorbell_area + 0x04;
3093
3094 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003095 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003096
3097 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003098 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003099
3100 memset((void *)cqicb, 0, sizeof(struct cqicb));
3101 cqicb->msix_vect = rx_ring->irq;
3102
Ron Mercer459caf52009-01-04 17:08:11 -08003103 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3104 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003105
Ron Mercer97345522009-01-09 11:31:50 +00003106 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003107
Ron Mercer97345522009-01-09 11:31:50 +00003108 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003109
3110 /*
3111 * Set up the control block load flags.
3112 */
3113 cqicb->flags = FLAGS_LC | /* Load queue base address */
3114 FLAGS_LV | /* Load MSI-X vector */
3115 FLAGS_LI; /* Load irq delay values */
3116 if (rx_ring->lbq_len) {
3117 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003118 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003119 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003120 page_entries = 0;
3121 do {
3122 *base_indirect_ptr = cpu_to_le64(tmp);
3123 tmp += DB_PAGE_SIZE;
3124 base_indirect_ptr++;
3125 page_entries++;
3126 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003127 cqicb->lbq_addr =
3128 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003129 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3130 (u16) rx_ring->lbq_buf_size;
3131 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3132 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3133 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003135 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003136 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003137 rx_ring->lbq_clean_idx = 0;
3138 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003139 }
3140 if (rx_ring->sbq_len) {
3141 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003142 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003143 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003144 page_entries = 0;
3145 do {
3146 *base_indirect_ptr = cpu_to_le64(tmp);
3147 tmp += DB_PAGE_SIZE;
3148 base_indirect_ptr++;
3149 page_entries++;
3150 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003151 cqicb->sbq_addr =
3152 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003153 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003154 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003155 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3156 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003157 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003158 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003159 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003160 rx_ring->sbq_clean_idx = 0;
3161 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003162 }
3163 switch (rx_ring->type) {
3164 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003165 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3166 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3167 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003168 case RX_Q:
3169 /* Inbound completion handling rx_rings run in
3170 * separate NAPI contexts.
3171 */
3172 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3173 64);
3174 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3175 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3176 break;
3177 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00003178 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3179 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003180 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003181 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3182 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003183 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3184 CFG_LCQ, rx_ring->cq_id);
3185 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003186 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003187 return err;
3188 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003189 return err;
3190}
3191
3192static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3193{
3194 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3195 void __iomem *doorbell_area =
3196 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3197 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3198 (tx_ring->wq_id * sizeof(u64));
3199 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3200 (tx_ring->wq_id * sizeof(u64));
3201 int err = 0;
3202
3203 /*
3204 * Assign doorbell registers for this tx_ring.
3205 */
3206 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003207 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003208 tx_ring->prod_idx = 0;
3209 /* TX PCI doorbell mem area + 0x04 */
3210 tx_ring->valid_db_reg = doorbell_area + 0x04;
3211
3212 /*
3213 * Assign shadow registers for this tx_ring.
3214 */
3215 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3216 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3217
3218 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3219 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3220 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3221 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3222 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003223 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003224
Ron Mercer97345522009-01-09 11:31:50 +00003225 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226
3227 ql_init_tx_ring(qdev, tx_ring);
3228
Ron Mercere3324712009-07-02 06:06:13 +00003229 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 (u16) tx_ring->wq_id);
3231 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003232 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003233 return err;
3234 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003235 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3236 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003237 return err;
3238}
3239
3240static void ql_disable_msix(struct ql_adapter *qdev)
3241{
3242 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3243 pci_disable_msix(qdev->pdev);
3244 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3245 kfree(qdev->msi_x_entry);
3246 qdev->msi_x_entry = NULL;
3247 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3248 pci_disable_msi(qdev->pdev);
3249 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3250 }
3251}
3252
Ron Mercera4ab6132009-08-27 11:02:10 +00003253/* We start by trying to get the number of vectors
3254 * stored in qdev->intr_count. If we don't get that
3255 * many then we reduce the count and try again.
3256 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003257static void ql_enable_msix(struct ql_adapter *qdev)
3258{
Ron Mercera4ab6132009-08-27 11:02:10 +00003259 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003261 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003262 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003263 /* Try to alloc space for the msix struct,
3264 * if it fails then go to MSI/legacy.
3265 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003266 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003267 sizeof(struct msix_entry),
3268 GFP_KERNEL);
3269 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003270 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003271 goto msi;
3272 }
3273
Ron Mercera4ab6132009-08-27 11:02:10 +00003274 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003275 qdev->msi_x_entry[i].entry = i;
3276
Ron Mercera4ab6132009-08-27 11:02:10 +00003277 /* Loop to get our vectors. We start with
3278 * what we want and settle for what we get.
3279 */
3280 do {
3281 err = pci_enable_msix(qdev->pdev,
3282 qdev->msi_x_entry, qdev->intr_count);
3283 if (err > 0)
3284 qdev->intr_count = err;
3285 } while (err > 0);
3286
3287 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003288 kfree(qdev->msi_x_entry);
3289 qdev->msi_x_entry = NULL;
Joe Perchesae9540f2010-02-09 11:49:52 +00003290 netif_warn(qdev, ifup, qdev->ndev,
3291 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003292 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003293 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003294 } else if (err == 0) {
3295 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f2010-02-09 11:49:52 +00003296 netif_info(qdev, ifup, qdev->ndev,
3297 "MSI-X Enabled, got %d vectors.\n",
3298 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003299 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003300 }
3301 }
3302msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003303 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003304 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003305 if (!pci_enable_msi(qdev->pdev)) {
3306 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f2010-02-09 11:49:52 +00003307 netif_info(qdev, ifup, qdev->ndev,
3308 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003309 return;
3310 }
3311 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003312 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f2010-02-09 11:49:52 +00003313 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3314 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003315}
3316
Ron Mercer39aa8162009-08-27 11:02:11 +00003317/* Each vector services 1 RSS ring and and 1 or more
3318 * TX completion rings. This function loops through
3319 * the TX completion rings and assigns the vector that
3320 * will service it. An example would be if there are
3321 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3322 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003323 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003324 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3325 */
3326static void ql_set_tx_vect(struct ql_adapter *qdev)
3327{
3328 int i, j, vect;
3329 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3330
3331 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3332 /* Assign irq vectors to TX rx_rings.*/
3333 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3334 i < qdev->rx_ring_count; i++) {
3335 if (j == tx_rings_per_vector) {
3336 vect++;
3337 j = 0;
3338 }
3339 qdev->rx_ring[i].irq = vect;
3340 j++;
3341 }
3342 } else {
3343 /* For single vector all rings have an irq
3344 * of zero.
3345 */
3346 for (i = 0; i < qdev->rx_ring_count; i++)
3347 qdev->rx_ring[i].irq = 0;
3348 }
3349}
3350
3351/* Set the interrupt mask for this vector. Each vector
3352 * will service 1 RSS ring and 1 or more TX completion
3353 * rings. This function sets up a bit mask per vector
3354 * that indicates which rings it services.
3355 */
3356static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3357{
3358 int j, vect = ctx->intr;
3359 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3360
3361 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3362 /* Add the RSS ring serviced by this vector
3363 * to the mask.
3364 */
3365 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3366 /* Add the TX ring(s) serviced by this vector
3367 * to the mask. */
3368 for (j = 0; j < tx_rings_per_vector; j++) {
3369 ctx->irq_mask |=
3370 (1 << qdev->rx_ring[qdev->rss_ring_count +
3371 (vect * tx_rings_per_vector) + j].cq_id);
3372 }
3373 } else {
3374 /* For single vector we just shift each queue's
3375 * ID into the mask.
3376 */
3377 for (j = 0; j < qdev->rx_ring_count; j++)
3378 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3379 }
3380}
3381
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003382/*
3383 * Here we build the intr_context structures based on
3384 * our rx_ring count and intr vector count.
3385 * The intr_context structure is used to hook each vector
3386 * to possibly different handlers.
3387 */
3388static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3389{
3390 int i = 0;
3391 struct intr_context *intr_context = &qdev->intr_context[0];
3392
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003393 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3394 /* Each rx_ring has it's
3395 * own intr_context since we have separate
3396 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003397 */
3398 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3399 qdev->rx_ring[i].irq = i;
3400 intr_context->intr = i;
3401 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003402 /* Set up this vector's bit-mask that indicates
3403 * which queues it services.
3404 */
3405 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003406 /*
3407 * We set up each vectors enable/disable/read bits so
3408 * there's no bit/mask calculations in the critical path.
3409 */
3410 intr_context->intr_en_mask =
3411 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3412 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3413 | i;
3414 intr_context->intr_dis_mask =
3415 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3416 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3417 INTR_EN_IHD | i;
3418 intr_context->intr_read_mask =
3419 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3421 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003422 if (i == 0) {
3423 /* The first vector/queue handles
3424 * broadcast/multicast, fatal errors,
3425 * and firmware events. This in addition
3426 * to normal inbound NAPI processing.
3427 */
3428 intr_context->handler = qlge_isr;
3429 sprintf(intr_context->name, "%s-rx-%d",
3430 qdev->ndev->name, i);
3431 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003432 /*
3433 * Inbound queues handle unicast frames only.
3434 */
3435 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003436 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003437 qdev->ndev->name, i);
3438 }
3439 }
3440 } else {
3441 /*
3442 * All rx_rings use the same intr_context since
3443 * there is only one vector.
3444 */
3445 intr_context->intr = 0;
3446 intr_context->qdev = qdev;
3447 /*
3448 * We set up each vectors enable/disable/read bits so
3449 * there's no bit/mask calculations in the critical path.
3450 */
3451 intr_context->intr_en_mask =
3452 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3453 intr_context->intr_dis_mask =
3454 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3455 INTR_EN_TYPE_DISABLE;
3456 intr_context->intr_read_mask =
3457 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3458 /*
3459 * Single interrupt means one handler for all rings.
3460 */
3461 intr_context->handler = qlge_isr;
3462 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003463 /* Set up this vector's bit-mask that indicates
3464 * which queues it services. In this case there is
3465 * a single vector so it will service all RSS and
3466 * TX completion rings.
3467 */
3468 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003469 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003470 /* Tell the TX completion rings which MSIx vector
3471 * they will be using.
3472 */
3473 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003474}
3475
3476static void ql_free_irq(struct ql_adapter *qdev)
3477{
3478 int i;
3479 struct intr_context *intr_context = &qdev->intr_context[0];
3480
3481 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3482 if (intr_context->hooked) {
3483 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3484 free_irq(qdev->msi_x_entry[i].vector,
3485 &qdev->rx_ring[i]);
Joe Perchesae9540f2010-02-09 11:49:52 +00003486 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3487 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003488 } else {
3489 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f2010-02-09 11:49:52 +00003490 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3491 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003492 }
3493 }
3494 }
3495 ql_disable_msix(qdev);
3496}
3497
3498static int ql_request_irq(struct ql_adapter *qdev)
3499{
3500 int i;
3501 int status = 0;
3502 struct pci_dev *pdev = qdev->pdev;
3503 struct intr_context *intr_context = &qdev->intr_context[0];
3504
3505 ql_resolve_queues_to_irqs(qdev);
3506
3507 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3508 atomic_set(&intr_context->irq_cnt, 0);
3509 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3510 status = request_irq(qdev->msi_x_entry[i].vector,
3511 intr_context->handler,
3512 0,
3513 intr_context->name,
3514 &qdev->rx_ring[i]);
3515 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003516 netif_err(qdev, ifup, qdev->ndev,
3517 "Failed request for MSIX interrupt %d.\n",
3518 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003519 goto err_irq;
3520 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00003521 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3522 "Hooked intr %d, queue type %s, with name %s.\n",
3523 i,
3524 qdev->rx_ring[i].type == DEFAULT_Q ?
3525 "DEFAULT_Q" :
3526 qdev->rx_ring[i].type == TX_Q ?
3527 "TX_Q" :
3528 qdev->rx_ring[i].type == RX_Q ?
3529 "RX_Q" : "",
3530 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003531 }
3532 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00003533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3534 "trying msi or legacy interrupts.\n");
3535 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3536 "%s: irq = %d.\n", __func__, pdev->irq);
3537 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3538 "%s: context->name = %s.\n", __func__,
3539 intr_context->name);
3540 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3541 "%s: dev_id = 0x%p.\n", __func__,
3542 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003543 status =
3544 request_irq(pdev->irq, qlge_isr,
3545 test_bit(QL_MSI_ENABLED,
3546 &qdev->
3547 flags) ? 0 : IRQF_SHARED,
3548 intr_context->name, &qdev->rx_ring[0]);
3549 if (status)
3550 goto err_irq;
3551
Joe Perchesae9540f2010-02-09 11:49:52 +00003552 netif_err(qdev, ifup, qdev->ndev,
3553 "Hooked intr %d, queue type %s, with name %s.\n",
3554 i,
3555 qdev->rx_ring[0].type == DEFAULT_Q ?
3556 "DEFAULT_Q" :
3557 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3558 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3559 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003560 }
3561 intr_context->hooked = 1;
3562 }
3563 return status;
3564err_irq:
Joe Perchesae9540f2010-02-09 11:49:52 +00003565 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003566 ql_free_irq(qdev);
3567 return status;
3568}
3569
3570static int ql_start_rss(struct ql_adapter *qdev)
3571{
Joe Perches215faf92010-12-21 02:16:10 -08003572 static const u8 init_hash_seed[] = {
3573 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3574 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3575 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3576 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3577 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3578 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003579 struct ricb *ricb = &qdev->ricb;
3580 int status = 0;
3581 int i;
3582 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3583
Ron Mercere3324712009-07-02 06:06:13 +00003584 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003585
Ron Mercerb2014ff2009-08-27 11:02:09 +00003586 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003587 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003588 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3589 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003590
3591 /*
3592 * Fill out the Indirection Table.
3593 */
Ron Mercer541ae282009-10-08 09:54:37 +00003594 for (i = 0; i < 1024; i++)
3595 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003596
Ron Mercer541ae282009-10-08 09:54:37 +00003597 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3598 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003599
Joe Perchesae9540f2010-02-09 11:49:52 +00003600 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003601
Ron Mercere3324712009-07-02 06:06:13 +00003602 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003603 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003604 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003605 return status;
3606 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003607 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3608 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003609 return status;
3610}
3611
Ron Mercera5f59dc2009-07-02 06:06:07 +00003612static int ql_clear_routing_entries(struct ql_adapter *qdev)
3613{
3614 int i, status = 0;
3615
3616 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3617 if (status)
3618 return status;
3619 /* Clear all the entries in the routing table. */
3620 for (i = 0; i < 16; i++) {
3621 status = ql_set_routing_reg(qdev, i, 0, 0);
3622 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003623 netif_err(qdev, ifup, qdev->ndev,
3624 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003625 break;
3626 }
3627 }
3628 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3629 return status;
3630}
3631
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003632/* Initialize the frame-to-queue routing. */
3633static int ql_route_initialize(struct ql_adapter *qdev)
3634{
3635 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003636
3637 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003638 status = ql_clear_routing_entries(qdev);
3639 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003640 return status;
3641
3642 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3643 if (status)
3644 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003645
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003646 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3647 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003648 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003649 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003650 "Failed to init routing register "
3651 "for IP CSUM error packets.\n");
3652 goto exit;
3653 }
3654 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3655 RT_IDX_TU_CSUM_ERR, 1);
3656 if (status) {
3657 netif_err(qdev, ifup, qdev->ndev,
3658 "Failed to init routing register "
3659 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003660 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003661 }
3662 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3663 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003664 netif_err(qdev, ifup, qdev->ndev,
3665 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003666 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003667 }
3668 /* If we have more than one inbound queue, then turn on RSS in the
3669 * routing block.
3670 */
3671 if (qdev->rss_ring_count > 1) {
3672 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3673 RT_IDX_RSS_MATCH, 1);
3674 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003675 netif_err(qdev, ifup, qdev->ndev,
3676 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003677 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003678 }
3679 }
3680
3681 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3682 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003683 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003684 netif_err(qdev, ifup, qdev->ndev,
3685 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003686exit:
3687 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003688 return status;
3689}
3690
Ron Mercer2ee1e272009-03-03 12:10:33 +00003691int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003692{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003693 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003694
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003695 /* If check if the link is up and use to
3696 * determine if we are setting or clearing
3697 * the MAC address in the CAM.
3698 */
3699 set = ql_read32(qdev, STS);
3700 set &= qdev->port_link_up;
3701 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003702 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003703 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003704 return status;
3705 }
3706
3707 status = ql_route_initialize(qdev);
3708 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003709 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003710
3711 return status;
3712}
3713
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003714static int ql_adapter_initialize(struct ql_adapter *qdev)
3715{
3716 u32 value, mask;
3717 int i;
3718 int status = 0;
3719
3720 /*
3721 * Set up the System register to halt on errors.
3722 */
3723 value = SYS_EFE | SYS_FAE;
3724 mask = value << 16;
3725 ql_write32(qdev, SYS, mask | value);
3726
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003727 /* Set the default queue, and VLAN behavior. */
3728 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3729 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003730 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3731
3732 /* Set the MPI interrupt to enabled. */
3733 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3734
3735 /* Enable the function, set pagesize, enable error checking. */
3736 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003737 FSC_EC | FSC_VM_PAGE_4K;
3738 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003739
3740 /* Set/clear header splitting. */
3741 mask = FSC_VM_PAGESIZE_MASK |
3742 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3743 ql_write32(qdev, FSC, mask | value);
3744
Ron Mercer572c5262010-01-02 10:37:42 +00003745 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003746
Ron Mercera3b71932009-10-08 09:54:38 +00003747 /* Set RX packet routing to use port/pci function on which the
3748 * packet arrived on in addition to usual frame routing.
3749 * This is helpful on bonding where both interfaces can have
3750 * the same MAC address.
3751 */
3752 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003753 /* Reroute all packets to our Interface.
3754 * They may have been routed to MPI firmware
3755 * due to WOL.
3756 */
3757 value = ql_read32(qdev, MGMT_RCV_CFG);
3758 value &= ~MGMT_RCV_CFG_RM;
3759 mask = 0xffff0000;
3760
3761 /* Sticky reg needs clearing due to WOL. */
3762 ql_write32(qdev, MGMT_RCV_CFG, mask);
3763 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3764
3765 /* Default WOL is enable on Mezz cards */
3766 if (qdev->pdev->subsystem_device == 0x0068 ||
3767 qdev->pdev->subsystem_device == 0x0180)
3768 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003769
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003770 /* Start up the rx queues. */
3771 for (i = 0; i < qdev->rx_ring_count; i++) {
3772 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3773 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003774 netif_err(qdev, ifup, qdev->ndev,
3775 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003776 return status;
3777 }
3778 }
3779
3780 /* If there is more than one inbound completion queue
3781 * then download a RICB to configure RSS.
3782 */
3783 if (qdev->rss_ring_count > 1) {
3784 status = ql_start_rss(qdev);
3785 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003786 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003787 return status;
3788 }
3789 }
3790
3791 /* Start up the tx queues. */
3792 for (i = 0; i < qdev->tx_ring_count; i++) {
3793 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3794 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003795 netif_err(qdev, ifup, qdev->ndev,
3796 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003797 return status;
3798 }
3799 }
3800
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003801 /* Initialize the port and set the max framesize. */
3802 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003803 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003804 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003805
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003806 /* Set up the MAC address and frame routing filter. */
3807 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003809 netif_err(qdev, ifup, qdev->ndev,
3810 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003811 return status;
3812 }
3813
3814 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003815 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003816 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3817 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003818 napi_enable(&qdev->rx_ring[i].napi);
3819 }
3820
3821 return status;
3822}
3823
3824/* Issue soft reset to chip. */
3825static int ql_adapter_reset(struct ql_adapter *qdev)
3826{
3827 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003828 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003829 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003830
Ron Mercera5f59dc2009-07-02 06:06:07 +00003831 /* Clear all the entries in the routing table. */
3832 status = ql_clear_routing_entries(qdev);
3833 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003834 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003835 return status;
3836 }
3837
3838 end_jiffies = jiffies +
3839 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003840
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003841 /* Check if bit is set then skip the mailbox command and
3842 * clear the bit, else we are in normal reset process.
3843 */
3844 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3845 /* Stop management traffic. */
3846 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003847
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003848 /* Wait for the NIC and MGMNT FIFOs to empty. */
3849 ql_wait_fifo_empty(qdev);
3850 } else
3851 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003852
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003853 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003854
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003855 do {
3856 value = ql_read32(qdev, RST_FO);
3857 if ((value & RST_FO_FR) == 0)
3858 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003859 cpu_relax();
3860 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003861
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003862 if (value & RST_FO_FR) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003863 netif_err(qdev, ifdown, qdev->ndev,
3864 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003865 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003866 }
3867
Ron Mercer84087f42009-10-08 09:54:41 +00003868 /* Resume management traffic. */
3869 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003870 return status;
3871}
3872
3873static void ql_display_dev_info(struct net_device *ndev)
3874{
Joe Perchesb16fed02010-11-15 11:12:28 +00003875 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003876
Joe Perchesae9540f2010-02-09 11:49:52 +00003877 netif_info(qdev, probe, qdev->ndev,
3878 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3879 "XG Roll = %d, XG Rev = %d.\n",
3880 qdev->func,
3881 qdev->port,
3882 qdev->chip_rev_id & 0x0000000f,
3883 qdev->chip_rev_id >> 4 & 0x0000000f,
3884 qdev->chip_rev_id >> 8 & 0x0000000f,
3885 qdev->chip_rev_id >> 12 & 0x0000000f);
3886 netif_info(qdev, probe, qdev->ndev,
3887 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003888}
3889
stephen hemmingerac409212010-10-21 07:50:54 +00003890static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003891{
3892 int status = 0;
3893 u32 wol = MB_WOL_DISABLE;
3894
3895 /* The CAM is still intact after a reset, but if we
3896 * are doing WOL, then we may need to program the
3897 * routing regs. We would also need to issue the mailbox
3898 * commands to instruct the MPI what to do per the ethtool
3899 * settings.
3900 */
3901
3902 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3903 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003904 netif_err(qdev, ifdown, qdev->ndev,
3905 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3906 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003907 return -EINVAL;
3908 }
3909
3910 if (qdev->wol & WAKE_MAGIC) {
3911 status = ql_mb_wol_set_magic(qdev, 1);
3912 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003913 netif_err(qdev, ifdown, qdev->ndev,
3914 "Failed to set magic packet on %s.\n",
3915 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003916 return status;
3917 } else
Joe Perchesae9540f2010-02-09 11:49:52 +00003918 netif_info(qdev, drv, qdev->ndev,
3919 "Enabled magic packet successfully on %s.\n",
3920 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003921
3922 wol |= MB_WOL_MAGIC_PKT;
3923 }
3924
3925 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003926 wol |= MB_WOL_MODE_ON;
3927 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f2010-02-09 11:49:52 +00003928 netif_err(qdev, drv, qdev->ndev,
3929 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003930 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f2010-02-09 11:49:52 +00003931 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003932 }
3933
3934 return status;
3935}
3936
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003937static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003938{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003939
Ron Mercer6497b602009-02-12 16:37:13 -08003940 /* Don't kill the reset worker thread if we
3941 * are in the process of recovery.
3942 */
3943 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3944 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003945 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3946 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003947 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003948 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003949 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003950}
3951
3952static int ql_adapter_down(struct ql_adapter *qdev)
3953{
3954 int i, status = 0;
3955
3956 ql_link_off(qdev);
3957
3958 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003959
Ron Mercer39aa8162009-08-27 11:02:11 +00003960 for (i = 0; i < qdev->rss_ring_count; i++)
3961 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003962
3963 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3964
3965 ql_disable_interrupts(qdev);
3966
3967 ql_tx_ring_clean(qdev);
3968
Ron Mercer6b318cb2009-03-09 10:59:26 +00003969 /* Call netif_napi_del() from common point.
3970 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003971 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003972 netif_napi_del(&qdev->rx_ring[i].napi);
3973
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003974 status = ql_adapter_reset(qdev);
3975 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003976 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3977 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00003978 ql_free_rx_buffers(qdev);
3979
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003980 return status;
3981}
3982
3983static int ql_adapter_up(struct ql_adapter *qdev)
3984{
3985 int err = 0;
3986
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003987 err = ql_adapter_initialize(qdev);
3988 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003989 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003990 goto err_init;
3991 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003992 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003993 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003994 /* If the port is initialized and the
3995 * link is up the turn on the carrier.
3996 */
3997 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3998 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003999 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004000 /* Restore rx mode. */
4001 clear_bit(QL_ALLMULTI, &qdev->flags);
4002 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4003 qlge_set_multicast_list(qdev->ndev);
4004
Ron Mercerc1b60092010-10-27 04:58:12 +00004005 /* Restore vlan setting. */
4006 qlge_restore_vlan(qdev);
4007
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004008 ql_enable_interrupts(qdev);
4009 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004010 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004011
4012 return 0;
4013err_init:
4014 ql_adapter_reset(qdev);
4015 return err;
4016}
4017
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004018static void ql_release_adapter_resources(struct ql_adapter *qdev)
4019{
4020 ql_free_mem_resources(qdev);
4021 ql_free_irq(qdev);
4022}
4023
4024static int ql_get_adapter_resources(struct ql_adapter *qdev)
4025{
4026 int status = 0;
4027
4028 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004029 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004030 return -ENOMEM;
4031 }
4032 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004033 return status;
4034}
4035
4036static int qlge_close(struct net_device *ndev)
4037{
4038 struct ql_adapter *qdev = netdev_priv(ndev);
4039
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004040 /* If we hit pci_channel_io_perm_failure
4041 * failure condition, then we already
4042 * brought the adapter down.
4043 */
4044 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004045 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004046 clear_bit(QL_EEH_FATAL, &qdev->flags);
4047 return 0;
4048 }
4049
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004050 /*
4051 * Wait for device to recover from a reset.
4052 * (Rarely happens, but possible.)
4053 */
4054 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4055 msleep(1);
4056 ql_adapter_down(qdev);
4057 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004058 return 0;
4059}
4060
4061static int ql_configure_rings(struct ql_adapter *qdev)
4062{
4063 int i;
4064 struct rx_ring *rx_ring;
4065 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004066 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004067 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4068 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4069
4070 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004071
Ron Mercera4ab6132009-08-27 11:02:10 +00004072 /* In a perfect world we have one RSS ring for each CPU
4073 * and each has it's own vector. To do that we ask for
4074 * cpu_cnt vectors. ql_enable_msix() will adjust the
4075 * vector count to what we actually get. We then
4076 * allocate an RSS ring for each.
4077 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004078 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004079 qdev->intr_count = cpu_cnt;
4080 ql_enable_msix(qdev);
4081 /* Adjust the RSS ring count to the actual vector count. */
4082 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004083 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004084 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004085
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004086 for (i = 0; i < qdev->tx_ring_count; i++) {
4087 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004088 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004089 tx_ring->qdev = qdev;
4090 tx_ring->wq_id = i;
4091 tx_ring->wq_len = qdev->tx_ring_size;
4092 tx_ring->wq_size =
4093 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4094
4095 /*
4096 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004097 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004098 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004099 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004100 }
4101
4102 for (i = 0; i < qdev->rx_ring_count; i++) {
4103 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004104 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004105 rx_ring->qdev = qdev;
4106 rx_ring->cq_id = i;
4107 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004108 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004109 /*
4110 * Inbound (RSS) queues.
4111 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004112 rx_ring->cq_len = qdev->rx_ring_size;
4113 rx_ring->cq_size =
4114 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4115 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4116 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004117 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004118 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f2010-02-09 11:49:52 +00004119 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4120 "lbq_buf_size %d, order = %d\n",
4121 rx_ring->lbq_buf_size,
4122 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004123 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4124 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004125 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004126 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004127 rx_ring->type = RX_Q;
4128 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004129 /*
4130 * Outbound queue handles outbound completions only.
4131 */
4132 /* outbound cq is same size as tx_ring it services. */
4133 rx_ring->cq_len = qdev->tx_ring_size;
4134 rx_ring->cq_size =
4135 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4136 rx_ring->lbq_len = 0;
4137 rx_ring->lbq_size = 0;
4138 rx_ring->lbq_buf_size = 0;
4139 rx_ring->sbq_len = 0;
4140 rx_ring->sbq_size = 0;
4141 rx_ring->sbq_buf_size = 0;
4142 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004143 }
4144 }
4145 return 0;
4146}
4147
4148static int qlge_open(struct net_device *ndev)
4149{
4150 int err = 0;
4151 struct ql_adapter *qdev = netdev_priv(ndev);
4152
Ron Mercer74e12432009-11-11 12:54:04 +00004153 err = ql_adapter_reset(qdev);
4154 if (err)
4155 return err;
4156
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004157 err = ql_configure_rings(qdev);
4158 if (err)
4159 return err;
4160
4161 err = ql_get_adapter_resources(qdev);
4162 if (err)
4163 goto error_up;
4164
4165 err = ql_adapter_up(qdev);
4166 if (err)
4167 goto error_up;
4168
4169 return err;
4170
4171error_up:
4172 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004173 return err;
4174}
4175
Ron Mercer7c734352009-10-19 03:32:19 +00004176static int ql_change_rx_buffers(struct ql_adapter *qdev)
4177{
4178 struct rx_ring *rx_ring;
4179 int i, status;
4180 u32 lbq_buf_len;
4181
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004182 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004183 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4184 int i = 3;
4185 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004186 netif_err(qdev, ifup, qdev->ndev,
4187 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004188 ssleep(1);
4189 }
4190
4191 if (!i) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004192 netif_err(qdev, ifup, qdev->ndev,
4193 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004194 return -ETIMEDOUT;
4195 }
4196 }
4197
4198 status = ql_adapter_down(qdev);
4199 if (status)
4200 goto error;
4201
4202 /* Get the new rx buffer size. */
4203 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4204 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4205 qdev->lbq_buf_order = get_order(lbq_buf_len);
4206
4207 for (i = 0; i < qdev->rss_ring_count; i++) {
4208 rx_ring = &qdev->rx_ring[i];
4209 /* Set the new size. */
4210 rx_ring->lbq_buf_size = lbq_buf_len;
4211 }
4212
4213 status = ql_adapter_up(qdev);
4214 if (status)
4215 goto error;
4216
4217 return status;
4218error:
Joe Perchesae9540f2010-02-09 11:49:52 +00004219 netif_alert(qdev, ifup, qdev->ndev,
4220 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004221 set_bit(QL_ADAPTER_UP, &qdev->flags);
4222 dev_close(qdev->ndev);
4223 return status;
4224}
4225
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004226static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4227{
4228 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004229 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004230
4231 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004232 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004233 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004234 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004235 } else
4236 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004237
4238 queue_delayed_work(qdev->workqueue,
4239 &qdev->mpi_port_cfg_work, 3*HZ);
4240
Breno Leitao746079d2010-02-04 10:11:19 +00004241 ndev->mtu = new_mtu;
4242
Ron Mercer7c734352009-10-19 03:32:19 +00004243 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004244 return 0;
4245 }
4246
Ron Mercer7c734352009-10-19 03:32:19 +00004247 status = ql_change_rx_buffers(qdev);
4248 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004249 netif_err(qdev, ifup, qdev->ndev,
4250 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004251 }
4252
4253 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004254}
4255
4256static struct net_device_stats *qlge_get_stats(struct net_device
4257 *ndev)
4258{
Ron Mercer885ee392009-11-03 13:49:31 +00004259 struct ql_adapter *qdev = netdev_priv(ndev);
4260 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4261 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4262 unsigned long pkts, mcast, dropped, errors, bytes;
4263 int i;
4264
4265 /* Get RX stats. */
4266 pkts = mcast = dropped = errors = bytes = 0;
4267 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4268 pkts += rx_ring->rx_packets;
4269 bytes += rx_ring->rx_bytes;
4270 dropped += rx_ring->rx_dropped;
4271 errors += rx_ring->rx_errors;
4272 mcast += rx_ring->rx_multicast;
4273 }
4274 ndev->stats.rx_packets = pkts;
4275 ndev->stats.rx_bytes = bytes;
4276 ndev->stats.rx_dropped = dropped;
4277 ndev->stats.rx_errors = errors;
4278 ndev->stats.multicast = mcast;
4279
4280 /* Get TX stats. */
4281 pkts = errors = bytes = 0;
4282 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4283 pkts += tx_ring->tx_packets;
4284 bytes += tx_ring->tx_bytes;
4285 errors += tx_ring->tx_errors;
4286 }
4287 ndev->stats.tx_packets = pkts;
4288 ndev->stats.tx_bytes = bytes;
4289 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004290 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004291}
4292
stephen hemmingerac409212010-10-21 07:50:54 +00004293static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004294{
Joe Perchesb16fed02010-11-15 11:12:28 +00004295 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad2010-04-01 21:22:57 +00004296 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004297 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004298
Ron Mercercc288f52009-02-23 10:42:14 +00004299 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4300 if (status)
4301 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004302 /*
4303 * Set or clear promiscuous mode if a
4304 * transition is taking place.
4305 */
4306 if (ndev->flags & IFF_PROMISC) {
4307 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4308 if (ql_set_routing_reg
4309 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004310 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004311 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004312 } else {
4313 set_bit(QL_PROMISCUOUS, &qdev->flags);
4314 }
4315 }
4316 } else {
4317 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4318 if (ql_set_routing_reg
4319 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004320 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004321 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004322 } else {
4323 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4324 }
4325 }
4326 }
4327
4328 /*
4329 * Set or clear all multicast mode if a
4330 * transition is taking place.
4331 */
4332 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004333 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004334 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4335 if (ql_set_routing_reg
4336 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004337 netif_err(qdev, hw, qdev->ndev,
4338 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004339 } else {
4340 set_bit(QL_ALLMULTI, &qdev->flags);
4341 }
4342 }
4343 } else {
4344 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4345 if (ql_set_routing_reg
4346 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004347 netif_err(qdev, hw, qdev->ndev,
4348 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004349 } else {
4350 clear_bit(QL_ALLMULTI, &qdev->flags);
4351 }
4352 }
4353 }
4354
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004355 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004356 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4357 if (status)
4358 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004359 i = 0;
Jiri Pirko22bedad2010-04-01 21:22:57 +00004360 netdev_for_each_mc_addr(ha, ndev) {
4361 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004362 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004363 netif_err(qdev, hw, qdev->ndev,
4364 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004365 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004366 goto exit;
4367 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004368 i++;
4369 }
Ron Mercercc288f52009-02-23 10:42:14 +00004370 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004371 if (ql_set_routing_reg
4372 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004373 netif_err(qdev, hw, qdev->ndev,
4374 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004375 } else {
4376 set_bit(QL_ALLMULTI, &qdev->flags);
4377 }
4378 }
4379exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004380 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004381}
4382
4383static int qlge_set_mac_address(struct net_device *ndev, void *p)
4384{
Joe Perchesb16fed02010-11-15 11:12:28 +00004385 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004386 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004387 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004388
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004389 if (!is_valid_ether_addr(addr->sa_data))
4390 return -EADDRNOTAVAIL;
4391 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004392 /* Update local copy of current mac address. */
4393 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004394
Ron Mercercc288f52009-02-23 10:42:14 +00004395 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4396 if (status)
4397 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004398 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4399 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004400 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00004401 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004402 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4403 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004404}
4405
4406static void qlge_tx_timeout(struct net_device *ndev)
4407{
Joe Perchesb16fed02010-11-15 11:12:28 +00004408 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004409 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004410}
4411
4412static void ql_asic_reset_work(struct work_struct *work)
4413{
4414 struct ql_adapter *qdev =
4415 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004416 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004417 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004418 status = ql_adapter_down(qdev);
4419 if (status)
4420 goto error;
4421
4422 status = ql_adapter_up(qdev);
4423 if (status)
4424 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004425
4426 /* Restore rx mode. */
4427 clear_bit(QL_ALLMULTI, &qdev->flags);
4428 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4429 qlge_set_multicast_list(qdev->ndev);
4430
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004431 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004432 return;
4433error:
Joe Perchesae9540f2010-02-09 11:49:52 +00004434 netif_alert(qdev, ifup, qdev->ndev,
4435 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004436
Ron Mercerdb988122009-03-09 10:59:17 +00004437 set_bit(QL_ADAPTER_UP, &qdev->flags);
4438 dev_close(qdev->ndev);
4439 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004440}
4441
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004442static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004443 .get_flash = ql_get_8012_flash_params,
4444 .port_initialize = ql_8012_port_initialize,
4445};
4446
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004447static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004448 .get_flash = ql_get_8000_flash_params,
4449 .port_initialize = ql_8000_port_initialize,
4450};
4451
Ron Mercere4552f52009-06-09 05:39:32 +00004452/* Find the pcie function number for the other NIC
4453 * on this chip. Since both NIC functions share a
4454 * common firmware we have the lowest enabled function
4455 * do any common work. Examples would be resetting
4456 * after a fatal firmware error, or doing a firmware
4457 * coredump.
4458 */
4459static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004460{
Ron Mercere4552f52009-06-09 05:39:32 +00004461 int status = 0;
4462 u32 temp;
4463 u32 nic_func1, nic_func2;
4464
4465 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4466 &temp);
4467 if (status)
4468 return status;
4469
4470 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4471 MPI_TEST_NIC_FUNC_MASK);
4472 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4473 MPI_TEST_NIC_FUNC_MASK);
4474
4475 if (qdev->func == nic_func1)
4476 qdev->alt_func = nic_func2;
4477 else if (qdev->func == nic_func2)
4478 qdev->alt_func = nic_func1;
4479 else
4480 status = -EIO;
4481
4482 return status;
4483}
4484
4485static int ql_get_board_info(struct ql_adapter *qdev)
4486{
4487 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004488 qdev->func =
4489 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004490 if (qdev->func > 3)
4491 return -EIO;
4492
4493 status = ql_get_alt_pcie_func(qdev);
4494 if (status)
4495 return status;
4496
4497 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4498 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004499 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4500 qdev->port_link_up = STS_PL1;
4501 qdev->port_init = STS_PI1;
4502 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4503 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4504 } else {
4505 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4506 qdev->port_link_up = STS_PL0;
4507 qdev->port_init = STS_PI0;
4508 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4509 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4510 }
4511 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004512 qdev->device_id = qdev->pdev->device;
4513 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4514 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004515 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4516 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004517 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004518}
4519
4520static void ql_release_all(struct pci_dev *pdev)
4521{
4522 struct net_device *ndev = pci_get_drvdata(pdev);
4523 struct ql_adapter *qdev = netdev_priv(ndev);
4524
4525 if (qdev->workqueue) {
4526 destroy_workqueue(qdev->workqueue);
4527 qdev->workqueue = NULL;
4528 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004529
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004531 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004532 if (qdev->doorbell_area)
4533 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004534 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004535 pci_release_regions(pdev);
4536 pci_set_drvdata(pdev, NULL);
4537}
4538
4539static int __devinit ql_init_device(struct pci_dev *pdev,
4540 struct net_device *ndev, int cards_found)
4541{
4542 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004543 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004544
Ron Mercere3324712009-07-02 06:06:13 +00004545 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004546 err = pci_enable_device(pdev);
4547 if (err) {
4548 dev_err(&pdev->dev, "PCI device enable failed.\n");
4549 return err;
4550 }
4551
Ron Mercerebd6e772009-09-29 08:39:25 +00004552 qdev->ndev = ndev;
4553 qdev->pdev = pdev;
4554 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004555
Ron Mercerbc9167f2009-10-10 09:35:04 +00004556 /* Set PCIe read request size */
4557 err = pcie_set_readrq(pdev, 4096);
4558 if (err) {
4559 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004560 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004561 }
4562
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004563 err = pci_request_regions(pdev, DRV_NAME);
4564 if (err) {
4565 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004566 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004567 }
4568
4569 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004570 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004572 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004573 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004574 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004575 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004576 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004577 }
4578
4579 if (err) {
4580 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004581 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582 }
4583
Ron Mercer73475332009-11-06 07:44:58 +00004584 /* Set PCIe reset type for EEH to fundamental. */
4585 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004586 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 qdev->reg_base =
4588 ioremap_nocache(pci_resource_start(pdev, 1),
4589 pci_resource_len(pdev, 1));
4590 if (!qdev->reg_base) {
4591 dev_err(&pdev->dev, "Register mapping failed.\n");
4592 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004593 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004594 }
4595
4596 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4597 qdev->doorbell_area =
4598 ioremap_nocache(pci_resource_start(pdev, 3),
4599 pci_resource_len(pdev, 3));
4600 if (!qdev->doorbell_area) {
4601 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4602 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004603 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004604 }
4605
Ron Mercere4552f52009-06-09 05:39:32 +00004606 err = ql_get_board_info(qdev);
4607 if (err) {
4608 dev_err(&pdev->dev, "Register access failed.\n");
4609 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004610 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004611 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004612 qdev->msg_enable = netif_msg_init(debug, default_msg);
4613 spin_lock_init(&qdev->hw_lock);
4614 spin_lock_init(&qdev->stats_lock);
4615
Ron Mercer8aae2602010-01-15 13:31:28 +00004616 if (qlge_mpi_coredump) {
4617 qdev->mpi_coredump =
4618 vmalloc(sizeof(struct ql_mpi_coredump));
4619 if (qdev->mpi_coredump == NULL) {
4620 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4621 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004622 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004623 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004624 if (qlge_force_coredump)
4625 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004626 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004627 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004628 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004629 if (err) {
4630 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004631 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004632 }
4633
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004634 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004635 /* Keep local copy of current mac address. */
4636 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004637
4638 /* Set up the default ring sizes. */
4639 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4640 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4641
4642 /* Set up the coalescing parameters. */
4643 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4644 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4645 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4646 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4647
4648 /*
4649 * Set up the operating parameters.
4650 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004651 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4652 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4653 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4654 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004655 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004656 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004657 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004658 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004659 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004660
4661 if (!cards_found) {
4662 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4663 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4664 DRV_NAME, DRV_VERSION);
4665 }
4666 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004667err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004668 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004669err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004670 pci_disable_device(pdev);
4671 return err;
4672}
4673
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004674static const struct net_device_ops qlge_netdev_ops = {
4675 .ndo_open = qlge_open,
4676 .ndo_stop = qlge_close,
4677 .ndo_start_xmit = qlge_send,
4678 .ndo_change_mtu = qlge_change_mtu,
4679 .ndo_get_stats = qlge_get_stats,
4680 .ndo_set_multicast_list = qlge_set_multicast_list,
4681 .ndo_set_mac_address = qlge_set_mac_address,
4682 .ndo_validate_addr = eth_validate_addr,
4683 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004684 .ndo_fix_features = qlge_fix_features,
4685 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004686 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4687 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004688};
4689
Ron Mercer15c052f2010-02-04 13:32:46 -08004690static void ql_timer(unsigned long data)
4691{
4692 struct ql_adapter *qdev = (struct ql_adapter *)data;
4693 u32 var = 0;
4694
4695 var = ql_read32(qdev, STS);
4696 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004697 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004698 return;
4699 }
4700
Breno Leitao72046d82010-07-01 03:00:17 +00004701 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004702}
4703
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004704static int __devinit qlge_probe(struct pci_dev *pdev,
4705 const struct pci_device_id *pci_entry)
4706{
4707 struct net_device *ndev = NULL;
4708 struct ql_adapter *qdev = NULL;
4709 static int cards_found = 0;
4710 int err = 0;
4711
Ron Mercer1e213302009-03-09 10:59:21 +00004712 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4713 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004714 if (!ndev)
4715 return -ENOMEM;
4716
4717 err = ql_init_device(pdev, ndev, cards_found);
4718 if (err < 0) {
4719 free_netdev(ndev);
4720 return err;
4721 }
4722
4723 qdev = netdev_priv(ndev);
4724 SET_NETDEV_DEV(ndev, &pdev->dev);
Michał Mirosław88230fd2011-04-18 13:31:21 +00004725 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4726 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4727 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4728 ndev->features = ndev->hw_features |
4729 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004730
4731 if (test_bit(QL_DMA64, &qdev->flags))
4732 ndev->features |= NETIF_F_HIGHDMA;
4733
4734 /*
4735 * Set up net_device structure.
4736 */
4737 ndev->tx_queue_len = qdev->tx_ring_size;
4738 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004739
4740 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004741 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004742 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004743
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004744 err = register_netdev(ndev);
4745 if (err) {
4746 dev_err(&pdev->dev, "net device registration failed.\n");
4747 ql_release_all(pdev);
4748 pci_disable_device(pdev);
4749 return err;
4750 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004751 /* Start up the timer to trigger EEH if
4752 * the bus goes dead
4753 */
4754 init_timer_deferrable(&qdev->timer);
4755 qdev->timer.data = (unsigned long)qdev;
4756 qdev->timer.function = ql_timer;
4757 qdev->timer.expires = jiffies + (5*HZ);
4758 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004759 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004760 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004761 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004762 cards_found++;
4763 return 0;
4764}
4765
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004766netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4767{
4768 return qlge_send(skb, ndev);
4769}
4770
4771int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4772{
4773 return ql_clean_inbound_rx_ring(rx_ring, budget);
4774}
4775
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004776static void __devexit qlge_remove(struct pci_dev *pdev)
4777{
4778 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004779 struct ql_adapter *qdev = netdev_priv(ndev);
4780 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004781 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004782 unregister_netdev(ndev);
4783 ql_release_all(pdev);
4784 pci_disable_device(pdev);
4785 free_netdev(ndev);
4786}
4787
Ron Mercer6d190c62009-10-28 08:39:20 +00004788/* Clean up resources without touching hardware. */
4789static void ql_eeh_close(struct net_device *ndev)
4790{
4791 int i;
4792 struct ql_adapter *qdev = netdev_priv(ndev);
4793
4794 if (netif_carrier_ok(ndev)) {
4795 netif_carrier_off(ndev);
4796 netif_stop_queue(ndev);
4797 }
4798
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004799 /* Disabling the timer */
4800 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004801 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004802
4803 for (i = 0; i < qdev->rss_ring_count; i++)
4804 netif_napi_del(&qdev->rx_ring[i].napi);
4805
4806 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4807 ql_tx_ring_clean(qdev);
4808 ql_free_rx_buffers(qdev);
4809 ql_release_adapter_resources(qdev);
4810}
4811
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004812/*
4813 * This callback is called by the PCI subsystem whenever
4814 * a PCI bus error is detected.
4815 */
4816static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4817 enum pci_channel_state state)
4818{
4819 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004820 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004821
Ron Mercer6d190c62009-10-28 08:39:20 +00004822 switch (state) {
4823 case pci_channel_io_normal:
4824 return PCI_ERS_RESULT_CAN_RECOVER;
4825 case pci_channel_io_frozen:
4826 netif_device_detach(ndev);
4827 if (netif_running(ndev))
4828 ql_eeh_close(ndev);
4829 pci_disable_device(pdev);
4830 return PCI_ERS_RESULT_NEED_RESET;
4831 case pci_channel_io_perm_failure:
4832 dev_err(&pdev->dev,
4833 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004834 ql_eeh_close(ndev);
4835 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004836 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004837 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004838
4839 /* Request a slot reset. */
4840 return PCI_ERS_RESULT_NEED_RESET;
4841}
4842
4843/*
4844 * This callback is called after the PCI buss has been reset.
4845 * Basically, this tries to restart the card from scratch.
4846 * This is a shortened version of the device probe/discovery code,
4847 * it resembles the first-half of the () routine.
4848 */
4849static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4850{
4851 struct net_device *ndev = pci_get_drvdata(pdev);
4852 struct ql_adapter *qdev = netdev_priv(ndev);
4853
Ron Mercer6d190c62009-10-28 08:39:20 +00004854 pdev->error_state = pci_channel_io_normal;
4855
4856 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004857 if (pci_enable_device(pdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004858 netif_err(qdev, ifup, qdev->ndev,
4859 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004860 return PCI_ERS_RESULT_DISCONNECT;
4861 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004862 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004863
4864 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004865 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004866 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004867 return PCI_ERS_RESULT_DISCONNECT;
4868 }
4869
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004870 return PCI_ERS_RESULT_RECOVERED;
4871}
4872
4873static void qlge_io_resume(struct pci_dev *pdev)
4874{
4875 struct net_device *ndev = pci_get_drvdata(pdev);
4876 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004877 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004878
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004879 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004880 err = qlge_open(ndev);
4881 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004882 netif_err(qdev, ifup, qdev->ndev,
4883 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004884 return;
4885 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004886 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00004887 netif_err(qdev, ifup, qdev->ndev,
4888 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004889 }
Breno Leitao72046d82010-07-01 03:00:17 +00004890 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004891 netif_device_attach(ndev);
4892}
4893
4894static struct pci_error_handlers qlge_err_handler = {
4895 .error_detected = qlge_io_error_detected,
4896 .slot_reset = qlge_io_slot_reset,
4897 .resume = qlge_io_resume,
4898};
4899
4900static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4901{
4902 struct net_device *ndev = pci_get_drvdata(pdev);
4903 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004904 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004905
4906 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004907 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004908
4909 if (netif_running(ndev)) {
4910 err = ql_adapter_down(qdev);
4911 if (!err)
4912 return err;
4913 }
4914
Ron Mercerbc083ce2009-10-21 11:07:40 +00004915 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004916 err = pci_save_state(pdev);
4917 if (err)
4918 return err;
4919
4920 pci_disable_device(pdev);
4921
4922 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4923
4924 return 0;
4925}
4926
David S. Miller04da2cf2008-09-19 16:14:24 -07004927#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004928static int qlge_resume(struct pci_dev *pdev)
4929{
4930 struct net_device *ndev = pci_get_drvdata(pdev);
4931 struct ql_adapter *qdev = netdev_priv(ndev);
4932 int err;
4933
4934 pci_set_power_state(pdev, PCI_D0);
4935 pci_restore_state(pdev);
4936 err = pci_enable_device(pdev);
4937 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004938 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004939 return err;
4940 }
4941 pci_set_master(pdev);
4942
4943 pci_enable_wake(pdev, PCI_D3hot, 0);
4944 pci_enable_wake(pdev, PCI_D3cold, 0);
4945
4946 if (netif_running(ndev)) {
4947 err = ql_adapter_up(qdev);
4948 if (err)
4949 return err;
4950 }
4951
Breno Leitao72046d82010-07-01 03:00:17 +00004952 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004953 netif_device_attach(ndev);
4954
4955 return 0;
4956}
David S. Miller04da2cf2008-09-19 16:14:24 -07004957#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004958
4959static void qlge_shutdown(struct pci_dev *pdev)
4960{
4961 qlge_suspend(pdev, PMSG_SUSPEND);
4962}
4963
4964static struct pci_driver qlge_driver = {
4965 .name = DRV_NAME,
4966 .id_table = qlge_pci_tbl,
4967 .probe = qlge_probe,
4968 .remove = __devexit_p(qlge_remove),
4969#ifdef CONFIG_PM
4970 .suspend = qlge_suspend,
4971 .resume = qlge_resume,
4972#endif
4973 .shutdown = qlge_shutdown,
4974 .err_handler = &qlge_err_handler
4975};
4976
4977static int __init qlge_init_module(void)
4978{
4979 return pci_register_driver(&qlge_driver);
4980}
4981
4982static void __exit qlge_exit(void)
4983{
4984 pci_unregister_driver(&qlge_driver);
4985}
4986
4987module_init(qlge_init_module);
4988module_exit(qlge_exit);