blob: 997976426799d90a328cef4ff958b92ce3ba491c [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +00009#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040010#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000036#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040041#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000061/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040063/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
Sonny Rao84cf7022010-11-18 11:50:02 +000066static int debug = -1; /* defaults above */
67module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040068MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000073static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000074module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000075MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040076
Ron Mercer8aae2602010-01-15 13:31:28 +000077static int qlge_mpi_coredump;
78module_param(qlge_mpi_coredump, int, 0);
79MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000081 "Default is OFF - Do Not allocate memory. ");
82
83static int qlge_force_coredump;
84module_param(qlge_force_coredump, int, 0);
85MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000088
Benoit Taine9baa3c32014-08-08 15:56:03 +020089static const struct pci_device_id qlge_pci_tbl[] = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040092 /* required last entry */
93 {0,}
94};
95
96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -040098static int ql_wol(struct ql_adapter *);
99static void qlge_set_multicast_list(struct net_device *);
100static int ql_adapter_down(struct ql_adapter *);
101static int ql_adapter_up(struct ql_adapter *);
stephen hemmingerac409212010-10-21 07:50:54 +0000102
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400103/* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
106 */
107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108{
109 u32 sem_bits = 0;
110
111 switch (sem_mask) {
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 break;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 break;
118 case SEM_ICB_MASK:
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 break;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 break;
124 case SEM_FLASH_MASK:
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 break;
127 case SEM_PROBE_MASK:
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 break;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 break;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 break;
136 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400138 return -EINVAL;
139 }
140
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
143}
144
145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000147 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400148 do {
149 if (!ql_sem_trylock(qdev, sem_mask))
150 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000151 udelay(100);
152 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400153 return -ETIMEDOUT;
154}
155
156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157{
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
160}
161
162/* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 */
167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168{
169 u32 temp;
170 int count = UDELAY_COUNT;
171
172 while (count) {
173 temp = ql_read32(qdev, reg);
174
175 /* check for errors */
176 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400180 return -EIO;
181 } else if (temp & bit)
182 return 0;
183 udelay(UDELAY_DELAY);
184 count--;
185 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400188 return -ETIMEDOUT;
189}
190
191/* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
193 */
194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195{
196 int count = UDELAY_COUNT;
197 u32 temp;
198
199 while (count) {
200 temp = ql_read32(qdev, CFG);
201 if (temp & CFG_LE)
202 return -EIO;
203 if (!(temp & bit))
204 return 0;
205 udelay(UDELAY_DELAY);
206 count--;
207 }
208 return -ETIMEDOUT;
209}
210
211
212/* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
214 */
215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 u16 q_id)
217{
218 u64 map;
219 int status = 0;
220 int direction;
221 u32 mask;
222 u32 value;
223
224 direction =
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 PCI_DMA_FROMDEVICE;
227
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400231 return -ENOMEM;
232 }
233
Ron Mercer4322c5b2009-07-02 06:06:06 +0000234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 if (status)
236 return status;
237
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400238 status = ql_wait_cfg(qdev, bit);
239 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400242 goto exit;
243 }
244
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400247
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
251
252 /*
253 * Wait for the bit to clear after signaling hw.
254 */
255 status = ql_wait_cfg(qdev, bit);
256exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 pci_unmap_single(qdev->pdev, map, size, direction);
259 return status;
260}
261
262/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 u32 *value)
265{
266 u32 offset = 0;
267 int status;
268
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400269 switch (type) {
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
272 {
273 status =
274 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400276 if (status)
277 goto exit;
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 status =
288 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 status =
303 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400305 if (status)
306 goto exit;
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 status =
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800312 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400313 if (status)
314 goto exit;
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 }
317 break;
318 }
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
321 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400324 status = -EPERM;
325 }
326exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400327 return status;
328}
329
330/* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
332 */
333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 u16 index)
335{
336 u32 offset = 0;
337 int status = 0;
338
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400339 switch (type) {
340 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000341 {
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
353 type | MAC_ADDR_E);
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
362 type | MAC_ADDR_E);
363
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 status =
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 goto exit;
370 break;
371 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 case MAC_ADDR_TYPE_CAM_MAC:
373 {
374 u32 cam_output;
375 u32 upper = (addr[0] << 8) | addr[1];
376 u32 lower =
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400379 status =
380 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400382 if (status)
383 goto exit;
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 type); /* type */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 status =
389 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400391 if (status)
392 goto exit;
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 type); /* type */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 status =
398 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400400 if (status)
401 goto exit;
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 type); /* type */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
408 */
Ron Mercer76b26692009-10-08 09:54:40 +0000409 cam_output = (CAM_OUT_ROUTE_NIC |
410 (qdev->
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
Patrick McHardyf6469682013-04-19 02:04:27 +0000413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400417 break;
418 }
419 case MAC_ADDR_TYPE_VLAN:
420 {
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
426 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400427 status =
428 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400430 if (status)
431 goto exit;
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 type | /* type */
435 enable_bit); /* enable/disable */
436 break;
437 }
438 case MAC_ADDR_TYPE_MULTI_FLTR:
439 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 status = -EPERM;
443 }
444exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400445 return status;
446}
447
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000448/* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
451 */
452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453{
454 int status;
455 char zero_mac_addr[ETH_ALEN];
456 char *addr;
457
458 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000459 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000462 } else {
Joe Perchesc7bf7162015-03-02 19:54:47 -0800463 eth_zero_addr(zero_mac_addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000464 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000467 }
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 if (status)
470 return status;
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000477 return status;
478}
479
Ron Mercer6a473302009-07-02 06:06:12 +0000480void ql_link_on(struct ql_adapter *qdev)
481{
Joe Perchesae9540f72010-02-09 11:49:52 +0000482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
485}
486
487void ql_link_off(struct ql_adapter *qdev)
488{
Joe Perchesae9540f72010-02-09 11:49:52 +0000489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
492}
493
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400494/* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
496 */
497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498{
499 int status = 0;
500
Ron Mercer939678f2009-01-04 17:08:29 -0800501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400502 if (status)
503 goto exit;
504
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400508 if (status)
509 goto exit;
510 *value = ql_read32(qdev, RT_DATA);
511exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400512 return status;
513}
514
515/* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
519 */
520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 int enable)
522{
Ron Mercer8587ea32009-02-23 10:42:15 +0000523 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 u32 value = 0;
525
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400526 switch (mask) {
527 case RT_IDX_CAM_HIT:
528 {
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 break;
533 }
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 {
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 break;
540 }
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 {
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 break;
547 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 {
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
554 break;
555 }
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 {
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
562 break;
563 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 {
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 break;
570 }
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000573 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 break;
577 }
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000580 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 break;
584 }
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 {
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 break;
591 }
592 case 0: /* Clear the E-bit on an entry. */
593 {
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
597 break;
598 }
599 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400602 status = -EPERM;
603 goto exit;
604 }
605
606 if (value) {
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 if (status)
609 goto exit;
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
613 }
614exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 return status;
616}
617
618static void ql_enable_interrupts(struct ql_adapter *qdev)
619{
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621}
622
623static void ql_disable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626}
627
628/* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300631 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400632 * a worker finishes. Once it hits zero we enable the interrupt.
633 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400635{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700636 u32 var = 0;
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
639
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400644 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700645 ctx->intr_en_mask);
646 var = ql_read32(qdev, STS);
647 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400648 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700649
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
653 ctx->intr_en_mask);
654 var = ql_read32(qdev, STS);
655 }
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400658}
659
660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661{
662 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700663 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400664
Ron Mercerbb0d2152008-10-20 10:30:26 -0700665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
667 */
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669 return 0;
670
671 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000672 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700673 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400674 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700675 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400676 var = ql_read32(qdev, STS);
677 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000679 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400680 return var;
681}
682
683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684{
685 int i;
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
690 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 i == 0))
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400694 ql_enable_completion_interrupt(qdev, i);
695 }
696
697}
698
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700{
701 int status, i;
702 u16 csum = 0;
703 __le16 *flash = (__le16 *)&qdev->flash;
704
705 status = strncmp((char *)&qdev->flash, str, 4);
706 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000708 return status;
709 }
710
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
713
714 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000717
718 return csum;
719}
720
Ron Mercer26351472009-02-02 13:53:57 -0800721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400722{
723 int status = 0;
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727 if (status)
728 goto exit;
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734 if (status)
735 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
739 */
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400741exit:
742 return status;
743}
744
Ron Mercercdca8d02009-03-02 08:07:31 +0000745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746{
747 u32 i, size;
748 int status;
749 __le32 *p = (__le32 *)&qdev->flash;
750 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000751 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000752
753 /* Get flash offset for function and adjust
754 * for dword access.
755 */
Ron Mercere4552f52009-06-09 05:39:32 +0000756 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 else
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762 return -ETIMEDOUT;
763
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
767 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000770 goto exit;
771 }
772 }
773
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
776 "8000");
777 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000779 status = -EINVAL;
780 goto exit;
781 }
782
Ron Mercer542512e2009-06-09 05:39:33 +0000783 /* Extract either manufacturer or BOFM modified
784 * MAC address.
785 */
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 memcpy(mac_addr,
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
790 else
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
794
795 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000797 status = -EINVAL;
798 goto exit;
799 }
800
801 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000802 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000803 qdev->ndev->addr_len);
804
805exit:
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
807 return status;
808}
809
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400811{
812 int i;
813 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800814 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800815 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800817
818 /* Second function's parameters follow the first
819 * function's.
820 */
Ron Mercere4552f52009-06-09 05:39:32 +0000821 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000822 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400823
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825 return -ETIMEDOUT;
826
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000827 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800828 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400829 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400832 goto exit;
833 }
834
835 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000836
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
839 "8012");
840 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000842 status = -EINVAL;
843 goto exit;
844 }
845
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847 status = -EINVAL;
848 goto exit;
849 }
850
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
854
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400855exit:
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
857 return status;
858}
859
860/* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
863 */
864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865{
866 int status;
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 return status;
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
876 return status;
877}
878
879/* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
882 */
883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884{
885 int status = 0;
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889 if (status)
890 goto exit;
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896 if (status)
897 goto exit;
898 /* get the data */
899 *data = ql_read32(qdev, XGMAC_DATA);
900exit:
901 return status;
902}
903
904/* This is used for reading the 64-bit statistics regs. */
905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906{
907 int status = 0;
908 u32 hi = 0;
909 u32 lo = 0;
910
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
912 if (status)
913 goto exit;
914
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916 if (status)
917 goto exit;
918
919 *data = (u64) lo | ((u64) hi << 32);
920
921exit:
922 return status;
923}
924
Ron Mercercdca8d02009-03-02 08:07:31 +0000925static int ql_8000_port_initialize(struct ql_adapter *qdev)
926{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000927 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000928 /*
929 * Get MPI firmware version for driver banner
930 * and ethool info.
931 */
932 status = ql_mb_about_fw(qdev);
933 if (status)
934 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000935 status = ql_mb_get_fw_state(qdev);
936 if (status)
937 goto exit;
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940exit:
941 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000942}
943
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400944/* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
948 * later date.
949 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000950static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400951{
952 int status = 0;
953 u32 data;
954
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
958 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400965 }
966 return status;
967 }
968
Joe Perchesae9540f72010-02-09 11:49:52 +0000969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972 if (status)
973 goto end;
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976 if (status)
977 goto end;
978
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985 if (status)
986 goto end;
987
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990 if (status)
991 goto end;
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995 if (status)
996 goto end;
997
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000 if (status)
1001 goto end;
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005 if (status)
1006 goto end;
1007
1008 /* Turn on jumbo. */
1009 status =
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011 if (status)
1012 goto end;
1013 status =
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015 if (status)
1016 goto end;
1017
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022 return status;
1023}
1024
Ron Mercer7c734352009-10-19 03:32:19 +00001025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001030/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001032{
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1038 return lbq_desc;
1039}
1040
Ron Mercer7c734352009-10-19 03:32:19 +00001041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1043{
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001047 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1050
1051 /* If it's the last chunk of our master page then
1052 * we unmap it.
1053 */
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1060 return lbq_desc;
1061}
1062
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001063/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001065{
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1071 return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1082 }
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
Ron Mercer7c734352009-10-19 03:32:19 +00001090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1092{
1093 if (!rx_ring->pg_chunk.page) {
1094 u64 map;
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 GFP_ATOMIC,
1097 qdev->lbq_buf_order);
1098 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001099 netif_err(qdev, drv, qdev->ndev,
1100 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001101 return -ENOMEM;
1102 }
1103 rx_ring->pg_chunk.offset = 0;
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 0, ql_lbq_block_size(qdev),
1106 PCI_DMA_FROMDEVICE);
1107 if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 __free_pages(rx_ring->pg_chunk.page,
1109 qdev->lbq_buf_order);
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00001110 rx_ring->pg_chunk.page = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00001111 netif_err(qdev, drv, qdev->ndev,
1112 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001113 return -ENOMEM;
1114 }
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 }
1118
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1121 */
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124 /* Adjust the master page chunk for next
1125 * buffer get.
1126 */
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1131 } else {
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1135 }
1136 return 0;
1137}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
Ron Mercer49f21862009-02-23 10:42:16 +00001141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001143 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 u64 map;
1145 int i;
1146
Ron Mercer7c734352009-10-19 03:32:19 +00001147 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 "lbq: try cleaning clean_idx = %d.\n",
1151 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001152 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001155 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001156 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001158 return;
1159 }
Ron Mercer7c734352009-10-19 03:32:19 +00001160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001163 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001165 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001166 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
Ron Mercer49f21862009-02-23 10:42:16 +00001195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001203 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001208 netif_printk(qdev, rx_status, KERN_DEBUG,
1209 qdev->ndev,
1210 "sbq: getting new skb for index %d.\n",
1211 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001212 sbq_desc->p.skb =
1213 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001214 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001215 if (sbq_desc->p.skb == NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001436/* Categorizing receive firmware frame errors */
Sritej Velagaae721f32013-04-18 19:49:52 +00001437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001439{
1440 struct nic_stats *stats = &qdev->nic_stats;
1441
1442 stats->rx_err_count++;
Sritej Velagaae721f32013-04-18 19:49:52 +00001443 rx_ring->rx_errors++;
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001444
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1448 break;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1451 break;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1454 break;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1457 break;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1460 break;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1463 default:
1464 break;
1465 }
1466}
1467
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001468/**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
1472static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 void *page, size_t *len)
1475{
1476 u16 *tags;
1477
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479 return;
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481 tags = (u16 *)page;
1482 /* Look for stacked vlan tags in ethertype field */
1483 if (tags[6] == ETH_P_8021Q &&
1484 tags[8] == ETH_P_8021Q)
1485 *len += 2 * VLAN_HLEN;
1486 else
1487 *len += VLAN_HLEN;
1488 }
1489}
1490
Ron Mercer4f848c02010-01-02 10:37:43 +00001491/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001492static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497{
1498 struct sk_buff *skb;
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001500 struct napi_struct *napi = &rx_ring->napi;
1501
Sritej Velagaae721f32013-04-18 19:49:52 +00001502 /* Frame error, so drop the packet. */
1503 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 put_page(lbq_desc->p.pg_chunk.page);
1506 return;
1507 }
Ron Mercer63526712010-01-02 10:37:44 +00001508 napi->dev = qdev->ndev;
1509
1510 skb = napi_get_frags(napi);
1511 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001512 netif_err(qdev, drv, qdev->ndev,
1513 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001514 rx_ring->rx_dropped++;
1515 put_page(lbq_desc->p.pg_chunk.page);
1516 return;
1517 }
1518 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001519 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 lbq_desc->p.pg_chunk.page,
1521 lbq_desc->p.pg_chunk.offset,
1522 length);
Ron Mercer63526712010-01-02 10:37:44 +00001523
1524 skb->len += length;
1525 skb->data_len += length;
1526 skb->truesize += length;
1527 skb_shinfo(skb)->nr_frags++;
1528
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += length;
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001533 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001535 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001536}
1537
1538/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001539static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 struct rx_ring *rx_ring,
1541 struct ib_mac_iocb_rsp *ib_mac_rsp,
1542 u32 length,
1543 u16 vlan_id)
1544{
1545 struct net_device *ndev = qdev->ndev;
1546 struct sk_buff *skb = NULL;
1547 void *addr;
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 struct napi_struct *napi = &rx_ring->napi;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001550 size_t hlen = ETH_HLEN;
Ron Mercer4f848c02010-01-02 10:37:43 +00001551
1552 skb = netdev_alloc_skb(ndev, length);
1553 if (!skb) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 rx_ring->rx_dropped++;
1555 put_page(lbq_desc->p.pg_chunk.page);
1556 return;
1557 }
1558
1559 addr = lbq_desc->p.pg_chunk.va;
1560 prefetch(addr);
1561
Sritej Velagaae721f32013-04-18 19:49:52 +00001562 /* Frame error, so drop the packet. */
1563 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565 goto err_out;
1566 }
1567
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001568 /* Update the MAC header length*/
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
Ron Mercer4f848c02010-01-02 10:37:43 +00001571 /* The max framesize filter on this chip is set higher than
1572 * MTU since FCoE uses 2k frames.
1573 */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001574 if (skb->len > ndev->mtu + hlen) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001575 netif_err(qdev, drv, qdev->ndev,
1576 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001577 rx_ring->rx_dropped++;
1578 goto err_out;
1579 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001580 memcpy(skb_put(skb, hlen), addr, hlen);
Joe Perchesae9540f72010-02-09 11:49:52 +00001581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001585 lbq_desc->p.pg_chunk.offset + hlen,
1586 length - hlen);
1587 skb->len += length - hlen;
1588 skb->data_len += length - hlen;
1589 skb->truesize += length - hlen;
Ron Mercer4f848c02010-01-02 10:37:43 +00001590
1591 rx_ring->rx_packets++;
1592 rx_ring->rx_bytes += skb->len;
1593 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001594 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001595
Michał Mirosław88230fd2011-04-18 13:31:21 +00001596 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001597 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598 /* TCP frame. */
1599 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001606 struct iphdr *iph =
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001607 (struct iphdr *)((u8 *)addr + hlen);
Ron Mercer4f848c02010-01-02 10:37:43 +00001608 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001609 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001610 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001611 netif_printk(qdev, rx_status, KERN_DEBUG,
1612 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001613 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001614 }
1615 }
1616 }
1617
1618 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001619 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001621 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 napi_gro_receive(napi, skb);
1623 else
1624 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001625 return;
1626err_out:
1627 dev_kfree_skb_any(skb);
1628 put_page(lbq_desc->p.pg_chunk.page);
1629}
1630
1631/* Process an inbound completion from an rx ring. */
1632static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 struct rx_ring *rx_ring,
1634 struct ib_mac_iocb_rsp *ib_mac_rsp,
1635 u32 length,
1636 u16 vlan_id)
1637{
1638 struct net_device *ndev = qdev->ndev;
1639 struct sk_buff *skb = NULL;
1640 struct sk_buff *new_skb = NULL;
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643 skb = sbq_desc->p.skb;
1644 /* Allocate new_skb and copy */
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 if (new_skb == NULL) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001647 rx_ring->rx_dropped++;
1648 return;
1649 }
1650 skb_reserve(new_skb, NET_IP_ALIGN);
1651 memcpy(skb_put(new_skb, length), skb->data, length);
1652 skb = new_skb;
1653
Sritej Velagaae721f32013-04-18 19:49:52 +00001654 /* Frame error, so drop the packet. */
1655 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1656 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1657 dev_kfree_skb_any(skb);
1658 return;
1659 }
1660
Ron Mercer4f848c02010-01-02 10:37:43 +00001661 /* loopback self test for ethtool */
1662 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1663 ql_check_lb_frame(qdev, skb);
1664 dev_kfree_skb_any(skb);
1665 return;
1666 }
1667
1668 /* The max framesize filter on this chip is set higher than
1669 * MTU since FCoE uses 2k frames.
1670 */
1671 if (skb->len > ndev->mtu + ETH_HLEN) {
1672 dev_kfree_skb_any(skb);
1673 rx_ring->rx_dropped++;
1674 return;
1675 }
1676
1677 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001678 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001679 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1680 "%s Multicast.\n",
1681 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1682 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1683 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1684 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1685 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1686 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001687 }
1688 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001691
1692 rx_ring->rx_packets++;
1693 rx_ring->rx_bytes += skb->len;
1694 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001695 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001696
1697 /* If rx checksum is on, and there are no
1698 * csum or frame errors.
1699 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001700 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001701 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1702 /* TCP frame. */
1703 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001704 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1705 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001706 skb->ip_summed = CHECKSUM_UNNECESSARY;
1707 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1708 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1709 /* Unfragmented ipv4 UDP frame. */
1710 struct iphdr *iph = (struct iphdr *) skb->data;
1711 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001712 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001713 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001714 netif_printk(qdev, rx_status, KERN_DEBUG,
1715 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001716 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001717 }
1718 }
1719 }
1720
1721 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001722 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001723 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001724 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1725 napi_gro_receive(&rx_ring->napi, skb);
1726 else
1727 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001728}
1729
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001730static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001731{
1732 void *temp_addr = skb->data;
1733
1734 /* Undo the skb_reserve(skb,32) we did before
1735 * giving to hardware, and realign data on
1736 * a 2-byte boundary.
1737 */
1738 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1739 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1740 skb_copy_to_linear_data(skb, temp_addr,
1741 (unsigned int)len);
1742}
1743
1744/*
1745 * This function builds an skb for the given inbound
1746 * completion. It will be rewritten for readability in the near
1747 * future, but for not it works well.
1748 */
1749static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1750 struct rx_ring *rx_ring,
1751 struct ib_mac_iocb_rsp *ib_mac_rsp)
1752{
1753 struct bq_desc *lbq_desc;
1754 struct bq_desc *sbq_desc;
1755 struct sk_buff *skb = NULL;
1756 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001757 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1758 size_t hlen = ETH_HLEN;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001759
1760 /*
1761 * Handle the header buffer if present.
1762 */
1763 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1764 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001767 /*
1768 * Headers fit nicely into a small buffer.
1769 */
1770 sbq_desc = ql_get_curr_sbuf(rx_ring);
1771 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001772 dma_unmap_addr(sbq_desc, mapaddr),
1773 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001774 PCI_DMA_FROMDEVICE);
1775 skb = sbq_desc->p.skb;
1776 ql_realign_skb(skb, hdr_len);
1777 skb_put(skb, hdr_len);
1778 sbq_desc->p.skb = NULL;
1779 }
1780
1781 /*
1782 * Handle the data buffer(s).
1783 */
1784 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001785 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001787 return skb;
1788 }
1789
1790 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 "Headers in small, data of %d bytes in small, combine them.\n",
1794 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 /*
1796 * Data is less than small buffer size so it's
1797 * stuffed in a small buffer.
1798 * For this case we append the data
1799 * from the "data" small buffer to the "header" small
1800 * buffer.
1801 */
1802 sbq_desc = ql_get_curr_sbuf(rx_ring);
1803 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001806 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001807 (sbq_desc, maplen),
1808 PCI_DMA_FROMDEVICE);
1809 memcpy(skb_put(skb, length),
1810 sbq_desc->p.skb->data, length);
1811 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001812 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001813 (sbq_desc,
1814 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001815 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 (sbq_desc,
1817 maplen),
1818 PCI_DMA_FROMDEVICE);
1819 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "%d bytes in a single small buffer.\n",
1822 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001823 sbq_desc = ql_get_curr_sbuf(rx_ring);
1824 skb = sbq_desc->p.skb;
1825 ql_realign_skb(skb, length);
1826 skb_put(skb, length);
1827 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001828 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001829 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001830 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001831 maplen),
1832 PCI_DMA_FROMDEVICE);
1833 sbq_desc->p.skb = NULL;
1834 }
1835 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1836 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001837 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1838 "Header in small, %d bytes in large. Chain large to small!\n",
1839 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001840 /*
1841 * The data is in a single large buffer. We
1842 * chain it to the header buffer's skb and let
1843 * it rip.
1844 */
Ron Mercer7c734352009-10-19 03:32:19 +00001845 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001846 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1847 "Chaining page at offset = %d, for %d bytes to skb.\n",
1848 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001849 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1850 lbq_desc->p.pg_chunk.offset,
1851 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001852 skb->len += length;
1853 skb->data_len += length;
1854 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 } else {
1856 /*
1857 * The headers and data are in a single large buffer. We
1858 * copy it to a new skb and let it go. This can happen with
1859 * jumbo mtu on a non-TCP/UDP frame.
1860 */
Ron Mercer7c734352009-10-19 03:32:19 +00001861 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001862 skb = netdev_alloc_skb(qdev->ndev, length);
1863 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001864 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1865 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 return NULL;
1867 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001868 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001869 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001870 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001871 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001872 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001873 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001874 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1875 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1876 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001877 skb_fill_page_desc(skb, 0,
1878 lbq_desc->p.pg_chunk.page,
1879 lbq_desc->p.pg_chunk.offset,
1880 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001881 skb->len += length;
1882 skb->data_len += length;
1883 skb->truesize += length;
1884 length -= length;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001885 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1886 lbq_desc->p.pg_chunk.va,
1887 &hlen);
1888 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001889 }
1890 } else {
1891 /*
1892 * The data is in a chain of large buffers
1893 * pointed to by a small buffer. We loop
1894 * thru and chain them to the our small header
1895 * buffer's skb.
1896 * frags: There are 18 max frags and our small
1897 * buffer will hold 32 of them. The thing is,
1898 * we'll use 3 max for our 9000 byte jumbo
1899 * frames. If the MTU goes up we could
1900 * eventually be in trouble.
1901 */
Ron Mercer7c734352009-10-19 03:32:19 +00001902 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001903 sbq_desc = ql_get_curr_sbuf(rx_ring);
1904 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001905 dma_unmap_addr(sbq_desc, mapaddr),
1906 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001907 PCI_DMA_FROMDEVICE);
1908 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1909 /*
1910 * This is an non TCP/UDP IP frame, so
1911 * the headers aren't split into a small
1912 * buffer. We have to use the small buffer
1913 * that contains our sg list as our skb to
1914 * send upstairs. Copy the sg list here to
1915 * a local buffer and use it to find the
1916 * pages to chain.
1917 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "%d bytes of headers & data in chain of large.\n",
1920 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001921 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001922 sbq_desc->p.skb = NULL;
1923 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001924 }
Harish Patilafe6e002014-09-18 17:27:24 -04001925 do {
Ron Mercer7c734352009-10-19 03:32:19 +00001926 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1927 size = (length < rx_ring->lbq_buf_size) ? length :
1928 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929
Joe Perchesae9540f72010-02-09 11:49:52 +00001930 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1931 "Adding page %d to skb for %d bytes.\n",
1932 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001933 skb_fill_page_desc(skb, i,
1934 lbq_desc->p.pg_chunk.page,
1935 lbq_desc->p.pg_chunk.offset,
1936 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001937 skb->len += size;
1938 skb->data_len += size;
1939 skb->truesize += size;
1940 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001941 i++;
Harish Patilafe6e002014-09-18 17:27:24 -04001942 } while (length > 0);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001943 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1944 &hlen);
1945 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001946 }
1947 return skb;
1948}
1949
1950/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001951static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001952 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001953 struct ib_mac_iocb_rsp *ib_mac_rsp,
1954 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001955{
1956 struct net_device *ndev = qdev->ndev;
1957 struct sk_buff *skb = NULL;
1958
1959 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1960
1961 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1962 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001963 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1964 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001965 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001966 return;
1967 }
1968
Sritej Velagaae721f32013-04-18 19:49:52 +00001969 /* Frame error, so drop the packet. */
1970 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1971 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1972 dev_kfree_skb_any(skb);
1973 return;
1974 }
1975
Ron Mercerec33a492009-06-09 05:39:28 +00001976 /* The max framesize filter on this chip is set higher than
1977 * MTU since FCoE uses 2k frames.
1978 */
1979 if (skb->len > ndev->mtu + ETH_HLEN) {
1980 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001981 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001982 return;
1983 }
1984
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001985 /* loopback self test for ethtool */
1986 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1987 ql_check_lb_frame(qdev, skb);
1988 dev_kfree_skb_any(skb);
1989 return;
1990 }
1991
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001992 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001993 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1995 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1996 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1997 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1998 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1999 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2000 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00002001 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002002 }
2003 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002004 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2005 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002006 }
Ron Mercerd555f592009-03-09 10:59:19 +00002007
Ron Mercerd555f592009-03-09 10:59:19 +00002008 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002009 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002010
2011 /* If rx checksum is on, and there are no
2012 * csum or frame errors.
2013 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00002014 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00002015 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2016 /* TCP frame. */
2017 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002020 skb->ip_summed = CHECKSUM_UNNECESSARY;
2021 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2022 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2023 /* Unfragmented ipv4 UDP frame. */
2024 struct iphdr *iph = (struct iphdr *) skb->data;
2025 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00002026 htons(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002027 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002028 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002030 }
2031 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002032 }
Ron Mercerd555f592009-03-09 10:59:19 +00002033
Ron Mercer885ee392009-11-03 13:49:31 +00002034 rx_ring->rx_packets++;
2035 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002036 skb_record_rx_queue(skb, rx_ring->cq_id);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002037 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002038 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002039 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2040 napi_gro_receive(&rx_ring->napi, skb);
2041 else
2042 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002043}
2044
Ron Mercer4f848c02010-01-02 10:37:43 +00002045/* Process an inbound completion from an rx ring. */
2046static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047 struct rx_ring *rx_ring,
2048 struct ib_mac_iocb_rsp *ib_mac_rsp)
2049{
2050 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002051 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2052 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
Ron Mercer4f848c02010-01-02 10:37:43 +00002053 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2054 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2055
2056 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2057
2058 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2059 /* The data and headers are split into
2060 * separate buffers.
2061 */
2062 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2063 vlan_id);
2064 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2065 /* The data fit in a single small buffer.
2066 * Allocate a new skb, copy the data and
2067 * return the buffer to the free pool.
2068 */
2069 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2070 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002071 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2072 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2073 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2074 /* TCP packet in a page chunk that's been checksummed.
2075 * Tack it on to our GRO skb and let it go.
2076 */
2077 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2078 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002079 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2080 /* Non-TCP packet in a page chunk. Allocate an
2081 * skb, tack it on frags, and send it up.
2082 */
2083 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2084 length, vlan_id);
2085 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002086 /* Non-TCP/UDP large frames that span multiple buffers
2087 * can be processed corrrectly by the split frame logic.
2088 */
2089 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2090 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002091 }
2092
2093 return (unsigned long)length;
2094}
2095
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002096/* Process an outbound completion from an rx ring. */
2097static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2098 struct ob_mac_iocb_rsp *mac_rsp)
2099{
2100 struct tx_ring *tx_ring;
2101 struct tx_ring_desc *tx_ring_desc;
2102
2103 QL_DUMP_OB_MAC_RSP(mac_rsp);
2104 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2105 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2106 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002107 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2108 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002109 dev_kfree_skb(tx_ring_desc->skb);
2110 tx_ring_desc->skb = NULL;
2111
2112 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2113 OB_MAC_IOCB_RSP_S |
2114 OB_MAC_IOCB_RSP_L |
2115 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2116 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002117 netif_warn(qdev, tx_done, qdev->ndev,
2118 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002119 }
2120 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002121 netif_warn(qdev, tx_done, qdev->ndev,
2122 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 }
2124 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002125 netif_warn(qdev, tx_done, qdev->ndev,
2126 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002127 }
2128 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002129 netif_warn(qdev, tx_done, qdev->ndev,
2130 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002131 }
2132 }
2133 atomic_inc(&tx_ring->tx_count);
2134}
2135
2136/* Fire up a handler to reset the MPI processor. */
2137void ql_queue_fw_error(struct ql_adapter *qdev)
2138{
Ron Mercer6a473302009-07-02 06:06:12 +00002139 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002140 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2141}
2142
2143void ql_queue_asic_error(struct ql_adapter *qdev)
2144{
Ron Mercer6a473302009-07-02 06:06:12 +00002145 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002146 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002147 /* Clear adapter up bit to signal the recovery
2148 * process that it shouldn't kill the reset worker
2149 * thread
2150 */
2151 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002152 /* Set asic recovery bit to indicate reset process that we are
2153 * in fatal error recovery process rather than normal close
2154 */
2155 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002156 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2157}
2158
2159static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2160 struct ib_ae_iocb_rsp *ib_ae_rsp)
2161{
2162 switch (ib_ae_rsp->event) {
2163 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002164 netif_err(qdev, rx_err, qdev->ndev,
2165 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 ql_queue_fw_error(qdev);
2167 return;
2168
2169 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002170 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2171 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002172 ql_queue_asic_error(qdev);
2173 return;
2174
2175 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002176 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002177 ql_queue_asic_error(qdev);
2178 break;
2179
2180 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002181 netdev_err(qdev->ndev, "PCI error occurred when reading "
2182 "anonymous buffers from rx_ring %d.\n",
2183 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002184 ql_queue_asic_error(qdev);
2185 break;
2186
2187 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002188 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2189 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002190 ql_queue_asic_error(qdev);
2191 break;
2192 }
2193}
2194
2195static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2196{
2197 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002198 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002199 struct ob_mac_iocb_rsp *net_rsp = NULL;
2200 int count = 0;
2201
Ron Mercer1e213302009-03-09 10:59:21 +00002202 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002203 /* While there are entries in the completion queue. */
2204 while (prod != rx_ring->cnsmr_idx) {
2205
Joe Perchesae9540f72010-02-09 11:49:52 +00002206 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2208 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002209
2210 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2211 rmb();
2212 switch (net_rsp->opcode) {
2213
2214 case OPCODE_OB_MAC_TSO_IOCB:
2215 case OPCODE_OB_MAC_IOCB:
2216 ql_process_mac_tx_intr(qdev, net_rsp);
2217 break;
2218 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002219 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2220 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2221 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002222 }
2223 count++;
2224 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002225 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002226 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002227 if (!net_rsp)
2228 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002229 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002230 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002231 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002232 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002233 /*
2234 * The queue got stopped because the tx_ring was full.
2235 * Wake it up, because it's now at least 25% empty.
2236 */
Ron Mercer1e213302009-03-09 10:59:21 +00002237 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002238 }
2239
2240 return count;
2241}
2242
2243static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2244{
2245 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002246 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002247 struct ql_net_rsp_iocb *net_rsp;
2248 int count = 0;
2249
2250 /* While there are entries in the completion queue. */
2251 while (prod != rx_ring->cnsmr_idx) {
2252
Joe Perchesae9540f72010-02-09 11:49:52 +00002253 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2254 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2255 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002256
2257 net_rsp = rx_ring->curr_entry;
2258 rmb();
2259 switch (net_rsp->opcode) {
2260 case OPCODE_IB_MAC_IOCB:
2261 ql_process_mac_rx_intr(qdev, rx_ring,
2262 (struct ib_mac_iocb_rsp *)
2263 net_rsp);
2264 break;
2265
2266 case OPCODE_IB_AE_IOCB:
2267 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2268 net_rsp);
2269 break;
2270 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002271 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2272 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2273 net_rsp->opcode);
2274 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002275 }
2276 count++;
2277 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002278 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002279 if (count == budget)
2280 break;
2281 }
2282 ql_update_buffer_queues(qdev, rx_ring);
2283 ql_write_cq_idx(rx_ring);
2284 return count;
2285}
2286
2287static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2288{
2289 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2290 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002291 struct rx_ring *trx_ring;
2292 int i, work_done = 0;
2293 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002294
Joe Perchesae9540f72010-02-09 11:49:52 +00002295 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2296 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002297
Ron Mercer39aa8162009-08-27 11:02:11 +00002298 /* Service the TX rings first. They start
2299 * right after the RSS rings. */
2300 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2301 trx_ring = &qdev->rx_ring[i];
2302 /* If this TX completion ring belongs to this vector and
2303 * it's not empty then service it.
2304 */
2305 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2306 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2307 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002308 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309 "%s: Servicing TX completion ring %d.\n",
2310 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002311 ql_clean_outbound_rx_ring(trx_ring);
2312 }
2313 }
2314
2315 /*
2316 * Now service the RSS ring if it's active.
2317 */
2318 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2319 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002320 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2321 "%s: Servicing RX completion ring %d.\n",
2322 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002323 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2324 }
2325
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002326 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002327 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2329 }
2330 return work_done;
2331}
2332
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002333static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002334{
2335 struct ql_adapter *qdev = netdev_priv(ndev);
2336
Patrick McHardyf6469682013-04-19 02:04:27 +00002337 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002338 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002339 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002340 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002341 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2342 }
2343}
2344
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002345/**
2346 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2347 * based on the features to enable/disable hardware vlan accel
2348 */
2349static int qlge_update_hw_vlan_features(struct net_device *ndev,
2350 netdev_features_t features)
2351{
2352 struct ql_adapter *qdev = netdev_priv(ndev);
2353 int status = 0;
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002354 bool need_restart = netif_running(ndev);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002355
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002356 if (need_restart) {
2357 status = ql_adapter_down(qdev);
2358 if (status) {
2359 netif_err(qdev, link, qdev->ndev,
2360 "Failed to bring down the adapter\n");
2361 return status;
2362 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002363 }
2364
2365 /* update the features with resent change */
2366 ndev->features = features;
2367
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002368 if (need_restart) {
2369 status = ql_adapter_up(qdev);
2370 if (status) {
2371 netif_err(qdev, link, qdev->ndev,
2372 "Failed to bring up the adapter\n");
2373 return status;
2374 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002375 }
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002376
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002377 return status;
2378}
2379
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002380static netdev_features_t qlge_fix_features(struct net_device *ndev,
2381 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002382{
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002383 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002384
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002385 /* Update the behavior of vlan accel in the adapter */
2386 err = qlge_update_hw_vlan_features(ndev, features);
2387 if (err)
2388 return err;
2389
Jiri Pirko18c49b92011-07-21 03:24:11 +00002390 return features;
2391}
2392
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002393static int qlge_set_features(struct net_device *ndev,
2394 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002395{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002396 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002397
Patrick McHardyf6469682013-04-19 02:04:27 +00002398 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002399 qlge_vlan_mode(ndev, features);
2400
2401 return 0;
2402}
2403
Jiri Pirko8e586132011-12-08 19:52:37 -05002404static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002405{
2406 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002407 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002408
Jiri Pirko8e586132011-12-08 19:52:37 -05002409 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2410 MAC_ADDR_TYPE_VLAN, vid);
2411 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002412 netif_err(qdev, ifup, qdev->ndev,
2413 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002414 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002415}
2416
Patrick McHardy80d5c362013-04-19 02:04:28 +00002417static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002418{
2419 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002420 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002421 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002422
Ron Mercercc288f52009-02-23 10:42:14 +00002423 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2424 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002425 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002426
Jiri Pirko8e586132011-12-08 19:52:37 -05002427 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002428 set_bit(vid, qdev->active_vlans);
2429
Ron Mercercc288f52009-02-23 10:42:14 +00002430 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002431
2432 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002433}
2434
Jiri Pirko8e586132011-12-08 19:52:37 -05002435static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002436{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002437 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002438 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002439
Jiri Pirko8e586132011-12-08 19:52:37 -05002440 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2441 MAC_ADDR_TYPE_VLAN, vid);
2442 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002443 netif_err(qdev, ifup, qdev->ndev,
2444 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002445 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002446}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002447
Patrick McHardy80d5c362013-04-19 02:04:28 +00002448static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002449{
2450 struct ql_adapter *qdev = netdev_priv(ndev);
2451 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002452 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002453
2454 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2455 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002456 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002457
Jiri Pirko8e586132011-12-08 19:52:37 -05002458 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002459 clear_bit(vid, qdev->active_vlans);
2460
2461 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002462
2463 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002464}
2465
Ron Mercerc1b60092010-10-27 04:58:12 +00002466static void qlge_restore_vlan(struct ql_adapter *qdev)
2467{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002468 int status;
2469 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002470
Jiri Pirko18c49b92011-07-21 03:24:11 +00002471 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2472 if (status)
2473 return;
2474
2475 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2476 __qlge_vlan_rx_add_vid(qdev, vid);
2477
2478 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002479}
2480
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002481/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2482static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2483{
2484 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002485 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002486 return IRQ_HANDLED;
2487}
2488
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002489/* This handles a fatal error, MPI activity, and the default
2490 * rx_ring in an MSI-X multiple vector environment.
2491 * In MSI/Legacy environment it also process the rest of
2492 * the rx_rings.
2493 */
2494static irqreturn_t qlge_isr(int irq, void *dev_id)
2495{
2496 struct rx_ring *rx_ring = dev_id;
2497 struct ql_adapter *qdev = rx_ring->qdev;
2498 struct intr_context *intr_context = &qdev->intr_context[0];
2499 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002500 int work_done = 0;
2501
Ron Mercerbb0d2152008-10-20 10:30:26 -07002502 spin_lock(&qdev->hw_lock);
2503 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002504 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2505 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002506 spin_unlock(&qdev->hw_lock);
2507 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002508 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002509 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002510
Ron Mercerbb0d2152008-10-20 10:30:26 -07002511 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002512
2513 /*
2514 * Check for fatal error.
2515 */
2516 if (var & STS_FE) {
2517 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002518 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002519 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002520 netdev_err(qdev->ndev, "Resetting chip. "
2521 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002522 return IRQ_HANDLED;
2523 }
2524
2525 /*
2526 * Check MPI processor activity.
2527 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002528 if ((var & STS_PI) &&
2529 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002530 /*
2531 * We've got an async event or mailbox completion.
2532 * Handle it and clear the source of the interrupt.
2533 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002534 netif_err(qdev, intr, qdev->ndev,
2535 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002536 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002537 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2538 queue_delayed_work_on(smp_processor_id(),
2539 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002540 work_done++;
2541 }
2542
2543 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002544 * Get the bit-mask that shows the active queues for this
2545 * pass. Compare it to the queues that this irq services
2546 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002547 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002548 var = ql_read32(qdev, ISR1);
2549 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002550 netif_info(qdev, intr, qdev->ndev,
2551 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002552 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002553 napi_schedule(&rx_ring->napi);
2554 work_done++;
2555 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002556 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002557 return work_done ? IRQ_HANDLED : IRQ_NONE;
2558}
2559
2560static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2561{
2562
2563 if (skb_is_gso(skb)) {
2564 int err;
Vlad Yasevich1ee1cfe2014-08-25 10:34:55 -04002565 __be16 l3_proto = vlan_get_protocol(skb);
françois romieubb9689e2014-03-29 12:26:27 +01002566
2567 err = skb_cow_head(skb, 0);
2568 if (err < 0)
2569 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002570
2571 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2572 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2573 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2574 mac_iocb_ptr->total_hdrs_len =
2575 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2576 mac_iocb_ptr->net_trans_offset =
2577 cpu_to_le16(skb_network_offset(skb) |
2578 skb_transport_offset(skb)
2579 << OB_MAC_TRANSPORT_HDR_SHIFT);
2580 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2581 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
Vlad Yasevich1ee1cfe2014-08-25 10:34:55 -04002582 if (likely(l3_proto == htons(ETH_P_IP))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002583 struct iphdr *iph = ip_hdr(skb);
2584 iph->check = 0;
2585 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2586 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2587 iph->daddr, 0,
2588 IPPROTO_TCP,
2589 0);
Vlad Yasevich1ee1cfe2014-08-25 10:34:55 -04002590 } else if (l3_proto == htons(ETH_P_IPV6)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002591 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2592 tcp_hdr(skb)->check =
2593 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2594 &ipv6_hdr(skb)->daddr,
2595 0, IPPROTO_TCP, 0);
2596 }
2597 return 1;
2598 }
2599 return 0;
2600}
2601
2602static void ql_hw_csum_setup(struct sk_buff *skb,
2603 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2604{
2605 int len;
2606 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002607 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002608 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2609 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2610 mac_iocb_ptr->net_trans_offset =
2611 cpu_to_le16(skb_network_offset(skb) |
2612 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2613
2614 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2615 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2616 if (likely(iph->protocol == IPPROTO_TCP)) {
2617 check = &(tcp_hdr(skb)->check);
2618 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2619 mac_iocb_ptr->total_hdrs_len =
2620 cpu_to_le16(skb_transport_offset(skb) +
2621 (tcp_hdr(skb)->doff << 2));
2622 } else {
2623 check = &(udp_hdr(skb)->check);
2624 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2625 mac_iocb_ptr->total_hdrs_len =
2626 cpu_to_le16(skb_transport_offset(skb) +
2627 sizeof(struct udphdr));
2628 }
2629 *check = ~csum_tcpudp_magic(iph->saddr,
2630 iph->daddr, len, iph->protocol, 0);
2631}
2632
Stephen Hemminger613573252009-08-31 19:50:58 +00002633static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002634{
2635 struct tx_ring_desc *tx_ring_desc;
2636 struct ob_mac_iocb_req *mac_iocb_ptr;
2637 struct ql_adapter *qdev = netdev_priv(ndev);
2638 int tso;
2639 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002640 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002641
2642 tx_ring = &qdev->tx_ring[tx_ring_idx];
2643
Ron Mercer74c50b42009-03-09 10:59:27 +00002644 if (skb_padto(skb, ETH_ZLEN))
2645 return NETDEV_TX_OK;
2646
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002647 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002648 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002649 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002650 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002651 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002652 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002653 return NETDEV_TX_BUSY;
2654 }
2655 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2656 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002657 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002658
2659 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2660 mac_iocb_ptr->tid = tx_ring_desc->index;
2661 /* We use the upper 32-bits to store the tx queue for this IO.
2662 * When we get the completion we can use it to establish the context.
2663 */
2664 mac_iocb_ptr->txq_idx = tx_ring_idx;
2665 tx_ring_desc->skb = skb;
2666
2667 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2668
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002669 if (skb_vlan_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002670 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002671 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002672 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002673 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002674 }
2675 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2676 if (tso < 0) {
2677 dev_kfree_skb_any(skb);
2678 return NETDEV_TX_OK;
2679 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2680 ql_hw_csum_setup(skb,
2681 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2682 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002683 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2684 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002685 netif_err(qdev, tx_queued, qdev->ndev,
2686 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002687 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002688 return NETDEV_TX_BUSY;
2689 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002690 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2691 tx_ring->prod_idx++;
2692 if (tx_ring->prod_idx == tx_ring->wq_len)
2693 tx_ring->prod_idx = 0;
2694 wmb();
2695
2696 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002697 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2698 "tx queued, slot %d, len %d\n",
2699 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002700
2701 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002702
2703 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2704 netif_stop_subqueue(ndev, tx_ring->wq_id);
2705 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2706 /*
2707 * The queue got stopped because the tx_ring was full.
2708 * Wake it up, because it's now at least 25% empty.
2709 */
2710 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2711 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002712 return NETDEV_TX_OK;
2713}
2714
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002715
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002716static void ql_free_shadow_space(struct ql_adapter *qdev)
2717{
2718 if (qdev->rx_ring_shadow_reg_area) {
2719 pci_free_consistent(qdev->pdev,
2720 PAGE_SIZE,
2721 qdev->rx_ring_shadow_reg_area,
2722 qdev->rx_ring_shadow_reg_dma);
2723 qdev->rx_ring_shadow_reg_area = NULL;
2724 }
2725 if (qdev->tx_ring_shadow_reg_area) {
2726 pci_free_consistent(qdev->pdev,
2727 PAGE_SIZE,
2728 qdev->tx_ring_shadow_reg_area,
2729 qdev->tx_ring_shadow_reg_dma);
2730 qdev->tx_ring_shadow_reg_area = NULL;
2731 }
2732}
2733
2734static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2735{
2736 qdev->rx_ring_shadow_reg_area =
Joe Perches440c7342014-08-08 14:24:34 -07002737 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2738 &qdev->rx_ring_shadow_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002739 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002740 netif_err(qdev, ifup, qdev->ndev,
2741 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002742 return -ENOMEM;
2743 }
Joe Perches440c7342014-08-08 14:24:34 -07002744
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002745 qdev->tx_ring_shadow_reg_area =
Joe Perches440c7342014-08-08 14:24:34 -07002746 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2747 &qdev->tx_ring_shadow_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002748 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002749 netif_err(qdev, ifup, qdev->ndev,
2750 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002751 goto err_wqp_sh_area;
2752 }
2753 return 0;
2754
2755err_wqp_sh_area:
2756 pci_free_consistent(qdev->pdev,
2757 PAGE_SIZE,
2758 qdev->rx_ring_shadow_reg_area,
2759 qdev->rx_ring_shadow_reg_dma);
2760 return -ENOMEM;
2761}
2762
2763static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2764{
2765 struct tx_ring_desc *tx_ring_desc;
2766 int i;
2767 struct ob_mac_iocb_req *mac_iocb_ptr;
2768
2769 mac_iocb_ptr = tx_ring->wq_base;
2770 tx_ring_desc = tx_ring->q;
2771 for (i = 0; i < tx_ring->wq_len; i++) {
2772 tx_ring_desc->index = i;
2773 tx_ring_desc->skb = NULL;
2774 tx_ring_desc->queue_entry = mac_iocb_ptr;
2775 mac_iocb_ptr++;
2776 tx_ring_desc++;
2777 }
2778 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002779}
2780
2781static void ql_free_tx_resources(struct ql_adapter *qdev,
2782 struct tx_ring *tx_ring)
2783{
2784 if (tx_ring->wq_base) {
2785 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2786 tx_ring->wq_base, tx_ring->wq_base_dma);
2787 tx_ring->wq_base = NULL;
2788 }
2789 kfree(tx_ring->q);
2790 tx_ring->q = NULL;
2791}
2792
2793static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2794 struct tx_ring *tx_ring)
2795{
2796 tx_ring->wq_base =
2797 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2798 &tx_ring->wq_base_dma);
2799
Joe Perches8e95a202009-12-03 07:58:21 +00002800 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002801 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2802 goto pci_alloc_err;
2803
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002804 tx_ring->q =
2805 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2806 if (tx_ring->q == NULL)
2807 goto err;
2808
2809 return 0;
2810err:
2811 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2812 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002813 tx_ring->wq_base = NULL;
2814pci_alloc_err:
2815 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002816 return -ENOMEM;
2817}
2818
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002819static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002820{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002821 struct bq_desc *lbq_desc;
2822
Ron Mercer7c734352009-10-19 03:32:19 +00002823 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002824
Ron Mercer7c734352009-10-19 03:32:19 +00002825 curr_idx = rx_ring->lbq_curr_idx;
2826 clean_idx = rx_ring->lbq_clean_idx;
2827 while (curr_idx != clean_idx) {
2828 lbq_desc = &rx_ring->lbq[curr_idx];
2829
2830 if (lbq_desc->p.pg_chunk.last_flag) {
2831 pci_unmap_page(qdev->pdev,
2832 lbq_desc->p.pg_chunk.map,
2833 ql_lbq_block_size(qdev),
2834 PCI_DMA_FROMDEVICE);
2835 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002836 }
Ron Mercer7c734352009-10-19 03:32:19 +00002837
2838 put_page(lbq_desc->p.pg_chunk.page);
2839 lbq_desc->p.pg_chunk.page = NULL;
2840
2841 if (++curr_idx == rx_ring->lbq_len)
2842 curr_idx = 0;
2843
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002844 }
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00002845 if (rx_ring->pg_chunk.page) {
2846 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2847 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2848 put_page(rx_ring->pg_chunk.page);
2849 rx_ring->pg_chunk.page = NULL;
2850 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002851}
2852
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002853static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002854{
2855 int i;
2856 struct bq_desc *sbq_desc;
2857
2858 for (i = 0; i < rx_ring->sbq_len; i++) {
2859 sbq_desc = &rx_ring->sbq[i];
2860 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002861 netif_err(qdev, ifup, qdev->ndev,
2862 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002863 return;
2864 }
2865 if (sbq_desc->p.skb) {
2866 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002867 dma_unmap_addr(sbq_desc, mapaddr),
2868 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002869 PCI_DMA_FROMDEVICE);
2870 dev_kfree_skb(sbq_desc->p.skb);
2871 sbq_desc->p.skb = NULL;
2872 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002873 }
2874}
2875
Ron Mercer4545a3f2009-02-23 10:42:17 +00002876/* Free all large and small rx buffers associated
2877 * with the completion queues for this device.
2878 */
2879static void ql_free_rx_buffers(struct ql_adapter *qdev)
2880{
2881 int i;
2882 struct rx_ring *rx_ring;
2883
2884 for (i = 0; i < qdev->rx_ring_count; i++) {
2885 rx_ring = &qdev->rx_ring[i];
2886 if (rx_ring->lbq)
2887 ql_free_lbq_buffers(qdev, rx_ring);
2888 if (rx_ring->sbq)
2889 ql_free_sbq_buffers(qdev, rx_ring);
2890 }
2891}
2892
2893static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2894{
2895 struct rx_ring *rx_ring;
2896 int i;
2897
2898 for (i = 0; i < qdev->rx_ring_count; i++) {
2899 rx_ring = &qdev->rx_ring[i];
2900 if (rx_ring->type != TX_Q)
2901 ql_update_buffer_queues(qdev, rx_ring);
2902 }
2903}
2904
2905static void ql_init_lbq_ring(struct ql_adapter *qdev,
2906 struct rx_ring *rx_ring)
2907{
2908 int i;
2909 struct bq_desc *lbq_desc;
2910 __le64 *bq = rx_ring->lbq_base;
2911
2912 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2913 for (i = 0; i < rx_ring->lbq_len; i++) {
2914 lbq_desc = &rx_ring->lbq[i];
2915 memset(lbq_desc, 0, sizeof(*lbq_desc));
2916 lbq_desc->index = i;
2917 lbq_desc->addr = bq;
2918 bq++;
2919 }
2920}
2921
2922static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002923 struct rx_ring *rx_ring)
2924{
2925 int i;
2926 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002927 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002928
Ron Mercer4545a3f2009-02-23 10:42:17 +00002929 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002930 for (i = 0; i < rx_ring->sbq_len; i++) {
2931 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002932 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002933 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002934 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002935 bq++;
2936 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937}
2938
2939static void ql_free_rx_resources(struct ql_adapter *qdev,
2940 struct rx_ring *rx_ring)
2941{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002942 /* Free the small buffer queue. */
2943 if (rx_ring->sbq_base) {
2944 pci_free_consistent(qdev->pdev,
2945 rx_ring->sbq_size,
2946 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2947 rx_ring->sbq_base = NULL;
2948 }
2949
2950 /* Free the small buffer queue control blocks. */
2951 kfree(rx_ring->sbq);
2952 rx_ring->sbq = NULL;
2953
2954 /* Free the large buffer queue. */
2955 if (rx_ring->lbq_base) {
2956 pci_free_consistent(qdev->pdev,
2957 rx_ring->lbq_size,
2958 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2959 rx_ring->lbq_base = NULL;
2960 }
2961
2962 /* Free the large buffer queue control blocks. */
2963 kfree(rx_ring->lbq);
2964 rx_ring->lbq = NULL;
2965
2966 /* Free the rx queue. */
2967 if (rx_ring->cq_base) {
2968 pci_free_consistent(qdev->pdev,
2969 rx_ring->cq_size,
2970 rx_ring->cq_base, rx_ring->cq_base_dma);
2971 rx_ring->cq_base = NULL;
2972 }
2973}
2974
2975/* Allocate queues and buffers for this completions queue based
2976 * on the values in the parameter structure. */
2977static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2978 struct rx_ring *rx_ring)
2979{
2980
2981 /*
2982 * Allocate the completion queue for this rx_ring.
2983 */
2984 rx_ring->cq_base =
2985 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2986 &rx_ring->cq_base_dma);
2987
2988 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002989 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002990 return -ENOMEM;
2991 }
2992
2993 if (rx_ring->sbq_len) {
2994 /*
2995 * Allocate small buffer queue.
2996 */
2997 rx_ring->sbq_base =
2998 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2999 &rx_ring->sbq_base_dma);
3000
3001 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003002 netif_err(qdev, ifup, qdev->ndev,
3003 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003004 goto err_mem;
3005 }
3006
3007 /*
3008 * Allocate small buffer queue control blocks.
3009 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003010 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3011 sizeof(struct bq_desc),
3012 GFP_KERNEL);
3013 if (rx_ring->sbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003014 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003015
Ron Mercer4545a3f2009-02-23 10:42:17 +00003016 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003017 }
3018
3019 if (rx_ring->lbq_len) {
3020 /*
3021 * Allocate large buffer queue.
3022 */
3023 rx_ring->lbq_base =
3024 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3025 &rx_ring->lbq_base_dma);
3026
3027 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003028 netif_err(qdev, ifup, qdev->ndev,
3029 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003030 goto err_mem;
3031 }
3032 /*
3033 * Allocate large buffer queue control blocks.
3034 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003035 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3036 sizeof(struct bq_desc),
3037 GFP_KERNEL);
3038 if (rx_ring->lbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003039 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003040
Ron Mercer4545a3f2009-02-23 10:42:17 +00003041 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003042 }
3043
3044 return 0;
3045
3046err_mem:
3047 ql_free_rx_resources(qdev, rx_ring);
3048 return -ENOMEM;
3049}
3050
3051static void ql_tx_ring_clean(struct ql_adapter *qdev)
3052{
3053 struct tx_ring *tx_ring;
3054 struct tx_ring_desc *tx_ring_desc;
3055 int i, j;
3056
3057 /*
3058 * Loop through all queues and free
3059 * any resources.
3060 */
3061 for (j = 0; j < qdev->tx_ring_count; j++) {
3062 tx_ring = &qdev->tx_ring[j];
3063 for (i = 0; i < tx_ring->wq_len; i++) {
3064 tx_ring_desc = &tx_ring->q[i];
3065 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003066 netif_err(qdev, ifdown, qdev->ndev,
3067 "Freeing lost SKB %p, from queue %d, index %d.\n",
3068 tx_ring_desc->skb, j,
3069 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003070 ql_unmap_send(qdev, tx_ring_desc,
3071 tx_ring_desc->map_cnt);
3072 dev_kfree_skb(tx_ring_desc->skb);
3073 tx_ring_desc->skb = NULL;
3074 }
3075 }
3076 }
3077}
3078
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003079static void ql_free_mem_resources(struct ql_adapter *qdev)
3080{
3081 int i;
3082
3083 for (i = 0; i < qdev->tx_ring_count; i++)
3084 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3085 for (i = 0; i < qdev->rx_ring_count; i++)
3086 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3087 ql_free_shadow_space(qdev);
3088}
3089
3090static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3091{
3092 int i;
3093
3094 /* Allocate space for our shadow registers and such. */
3095 if (ql_alloc_shadow_space(qdev))
3096 return -ENOMEM;
3097
3098 for (i = 0; i < qdev->rx_ring_count; i++) {
3099 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003100 netif_err(qdev, ifup, qdev->ndev,
3101 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003102 goto err_mem;
3103 }
3104 }
3105 /* Allocate tx queue resources */
3106 for (i = 0; i < qdev->tx_ring_count; i++) {
3107 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003108 netif_err(qdev, ifup, qdev->ndev,
3109 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003110 goto err_mem;
3111 }
3112 }
3113 return 0;
3114
3115err_mem:
3116 ql_free_mem_resources(qdev);
3117 return -ENOMEM;
3118}
3119
3120/* Set up the rx ring control block and pass it to the chip.
3121 * The control block is defined as
3122 * "Completion Queue Initialization Control Block", or cqicb.
3123 */
3124static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3125{
3126 struct cqicb *cqicb = &rx_ring->cqicb;
3127 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003128 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003129 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003130 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003131 void __iomem *doorbell_area =
3132 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3133 int err = 0;
3134 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003135 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003136 __le64 *base_indirect_ptr;
3137 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003138
3139 /* Set up the shadow registers for this ring. */
3140 rx_ring->prod_idx_sh_reg = shadow_reg;
3141 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003142 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003143 shadow_reg += sizeof(u64);
3144 shadow_reg_dma += sizeof(u64);
3145 rx_ring->lbq_base_indirect = shadow_reg;
3146 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003147 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3148 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003149 rx_ring->sbq_base_indirect = shadow_reg;
3150 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3151
3152 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003153 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 rx_ring->cnsmr_idx = 0;
3155 rx_ring->curr_entry = rx_ring->cq_base;
3156
3157 /* PCI doorbell mem area + 0x04 for valid register */
3158 rx_ring->valid_db_reg = doorbell_area + 0x04;
3159
3160 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003161 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003162
3163 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003164 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003165
3166 memset((void *)cqicb, 0, sizeof(struct cqicb));
3167 cqicb->msix_vect = rx_ring->irq;
3168
Ron Mercer459caf52009-01-04 17:08:11 -08003169 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3170 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003171
Ron Mercer97345522009-01-09 11:31:50 +00003172 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003173
Ron Mercer97345522009-01-09 11:31:50 +00003174 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003175
3176 /*
3177 * Set up the control block load flags.
3178 */
3179 cqicb->flags = FLAGS_LC | /* Load queue base address */
3180 FLAGS_LV | /* Load MSI-X vector */
3181 FLAGS_LI; /* Load irq delay values */
3182 if (rx_ring->lbq_len) {
3183 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003184 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003185 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003186 page_entries = 0;
3187 do {
3188 *base_indirect_ptr = cpu_to_le64(tmp);
3189 tmp += DB_PAGE_SIZE;
3190 base_indirect_ptr++;
3191 page_entries++;
3192 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003193 cqicb->lbq_addr =
3194 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003195 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3196 (u16) rx_ring->lbq_buf_size;
3197 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3198 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3199 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003200 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003201 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003202 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003203 rx_ring->lbq_clean_idx = 0;
3204 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003205 }
3206 if (rx_ring->sbq_len) {
3207 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003208 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003209 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003210 page_entries = 0;
3211 do {
3212 *base_indirect_ptr = cpu_to_le64(tmp);
3213 tmp += DB_PAGE_SIZE;
3214 base_indirect_ptr++;
3215 page_entries++;
3216 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003217 cqicb->sbq_addr =
3218 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003219 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003220 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003221 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3222 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003223 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003224 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003225 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003226 rx_ring->sbq_clean_idx = 0;
3227 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003228 }
3229 switch (rx_ring->type) {
3230 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003231 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3232 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3233 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003234 case RX_Q:
3235 /* Inbound completion handling rx_rings run in
3236 * separate NAPI contexts.
3237 */
3238 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3239 64);
3240 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3241 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3242 break;
3243 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003244 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3245 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003246 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003247 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3248 CFG_LCQ, rx_ring->cq_id);
3249 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003250 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003251 return err;
3252 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003253 return err;
3254}
3255
3256static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3257{
3258 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3259 void __iomem *doorbell_area =
3260 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3261 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3262 (tx_ring->wq_id * sizeof(u64));
3263 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3264 (tx_ring->wq_id * sizeof(u64));
3265 int err = 0;
3266
3267 /*
3268 * Assign doorbell registers for this tx_ring.
3269 */
3270 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003271 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003272 tx_ring->prod_idx = 0;
3273 /* TX PCI doorbell mem area + 0x04 */
3274 tx_ring->valid_db_reg = doorbell_area + 0x04;
3275
3276 /*
3277 * Assign shadow registers for this tx_ring.
3278 */
3279 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3280 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3281
3282 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3283 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3284 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3285 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3286 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003287 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003288
Ron Mercer97345522009-01-09 11:31:50 +00003289 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003290
3291 ql_init_tx_ring(qdev, tx_ring);
3292
Ron Mercere3324712009-07-02 06:06:13 +00003293 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003294 (u16) tx_ring->wq_id);
3295 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003296 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003297 return err;
3298 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003299 return err;
3300}
3301
3302static void ql_disable_msix(struct ql_adapter *qdev)
3303{
3304 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3305 pci_disable_msix(qdev->pdev);
3306 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3307 kfree(qdev->msi_x_entry);
3308 qdev->msi_x_entry = NULL;
3309 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3310 pci_disable_msi(qdev->pdev);
3311 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3312 }
3313}
3314
Ron Mercera4ab6132009-08-27 11:02:10 +00003315/* We start by trying to get the number of vectors
3316 * stored in qdev->intr_count. If we don't get that
3317 * many then we reduce the count and try again.
3318 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003319static void ql_enable_msix(struct ql_adapter *qdev)
3320{
Ron Mercera4ab6132009-08-27 11:02:10 +00003321 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003322
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003323 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003324 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003325 /* Try to alloc space for the msix struct,
3326 * if it fails then go to MSI/legacy.
3327 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003328 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003329 sizeof(struct msix_entry),
3330 GFP_KERNEL);
3331 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003332 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003333 goto msi;
3334 }
3335
Ron Mercera4ab6132009-08-27 11:02:10 +00003336 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003337 qdev->msi_x_entry[i].entry = i;
3338
Alexander Gordeev50b483a2014-02-18 11:11:59 +01003339 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3340 1, qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003341 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003342 kfree(qdev->msi_x_entry);
3343 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003344 netif_warn(qdev, ifup, qdev->ndev,
3345 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera5a62a12009-11-11 12:54:05 +00003346 qlge_irq_type = MSI_IRQ;
Alexander Gordeev50b483a2014-02-18 11:11:59 +01003347 } else {
3348 qdev->intr_count = err;
Ron Mercera4ab6132009-08-27 11:02:10 +00003349 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003350 netif_info(qdev, ifup, qdev->ndev,
3351 "MSI-X Enabled, got %d vectors.\n",
3352 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003353 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003354 }
3355 }
3356msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003357 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003358 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003359 if (!pci_enable_msi(qdev->pdev)) {
3360 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003361 netif_info(qdev, ifup, qdev->ndev,
3362 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003363 return;
3364 }
3365 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003366 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003367 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3368 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003369}
3370
Ron Mercer39aa8162009-08-27 11:02:11 +00003371/* Each vector services 1 RSS ring and and 1 or more
3372 * TX completion rings. This function loops through
3373 * the TX completion rings and assigns the vector that
3374 * will service it. An example would be if there are
3375 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3376 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003377 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003378 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3379 */
3380static void ql_set_tx_vect(struct ql_adapter *qdev)
3381{
3382 int i, j, vect;
3383 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3384
3385 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3386 /* Assign irq vectors to TX rx_rings.*/
3387 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3388 i < qdev->rx_ring_count; i++) {
3389 if (j == tx_rings_per_vector) {
3390 vect++;
3391 j = 0;
3392 }
3393 qdev->rx_ring[i].irq = vect;
3394 j++;
3395 }
3396 } else {
3397 /* For single vector all rings have an irq
3398 * of zero.
3399 */
3400 for (i = 0; i < qdev->rx_ring_count; i++)
3401 qdev->rx_ring[i].irq = 0;
3402 }
3403}
3404
3405/* Set the interrupt mask for this vector. Each vector
3406 * will service 1 RSS ring and 1 or more TX completion
3407 * rings. This function sets up a bit mask per vector
3408 * that indicates which rings it services.
3409 */
3410static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3411{
3412 int j, vect = ctx->intr;
3413 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3414
3415 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3416 /* Add the RSS ring serviced by this vector
3417 * to the mask.
3418 */
3419 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3420 /* Add the TX ring(s) serviced by this vector
3421 * to the mask. */
3422 for (j = 0; j < tx_rings_per_vector; j++) {
3423 ctx->irq_mask |=
3424 (1 << qdev->rx_ring[qdev->rss_ring_count +
3425 (vect * tx_rings_per_vector) + j].cq_id);
3426 }
3427 } else {
3428 /* For single vector we just shift each queue's
3429 * ID into the mask.
3430 */
3431 for (j = 0; j < qdev->rx_ring_count; j++)
3432 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3433 }
3434}
3435
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003436/*
3437 * Here we build the intr_context structures based on
3438 * our rx_ring count and intr vector count.
3439 * The intr_context structure is used to hook each vector
3440 * to possibly different handlers.
3441 */
3442static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3443{
3444 int i = 0;
3445 struct intr_context *intr_context = &qdev->intr_context[0];
3446
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003447 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3448 /* Each rx_ring has it's
3449 * own intr_context since we have separate
3450 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003451 */
3452 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3453 qdev->rx_ring[i].irq = i;
3454 intr_context->intr = i;
3455 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003456 /* Set up this vector's bit-mask that indicates
3457 * which queues it services.
3458 */
3459 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003460 /*
3461 * We set up each vectors enable/disable/read bits so
3462 * there's no bit/mask calculations in the critical path.
3463 */
3464 intr_context->intr_en_mask =
3465 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3466 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3467 | i;
3468 intr_context->intr_dis_mask =
3469 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3470 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3471 INTR_EN_IHD | i;
3472 intr_context->intr_read_mask =
3473 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3474 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3475 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003476 if (i == 0) {
3477 /* The first vector/queue handles
3478 * broadcast/multicast, fatal errors,
3479 * and firmware events. This in addition
3480 * to normal inbound NAPI processing.
3481 */
3482 intr_context->handler = qlge_isr;
3483 sprintf(intr_context->name, "%s-rx-%d",
3484 qdev->ndev->name, i);
3485 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003486 /*
3487 * Inbound queues handle unicast frames only.
3488 */
3489 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003490 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003491 qdev->ndev->name, i);
3492 }
3493 }
3494 } else {
3495 /*
3496 * All rx_rings use the same intr_context since
3497 * there is only one vector.
3498 */
3499 intr_context->intr = 0;
3500 intr_context->qdev = qdev;
3501 /*
3502 * We set up each vectors enable/disable/read bits so
3503 * there's no bit/mask calculations in the critical path.
3504 */
3505 intr_context->intr_en_mask =
3506 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3507 intr_context->intr_dis_mask =
3508 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3509 INTR_EN_TYPE_DISABLE;
3510 intr_context->intr_read_mask =
3511 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3512 /*
3513 * Single interrupt means one handler for all rings.
3514 */
3515 intr_context->handler = qlge_isr;
3516 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003517 /* Set up this vector's bit-mask that indicates
3518 * which queues it services. In this case there is
3519 * a single vector so it will service all RSS and
3520 * TX completion rings.
3521 */
3522 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003523 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003524 /* Tell the TX completion rings which MSIx vector
3525 * they will be using.
3526 */
3527 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003528}
3529
3530static void ql_free_irq(struct ql_adapter *qdev)
3531{
3532 int i;
3533 struct intr_context *intr_context = &qdev->intr_context[0];
3534
3535 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3536 if (intr_context->hooked) {
3537 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3538 free_irq(qdev->msi_x_entry[i].vector,
3539 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540 } else {
3541 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003542 }
3543 }
3544 }
3545 ql_disable_msix(qdev);
3546}
3547
3548static int ql_request_irq(struct ql_adapter *qdev)
3549{
3550 int i;
3551 int status = 0;
3552 struct pci_dev *pdev = qdev->pdev;
3553 struct intr_context *intr_context = &qdev->intr_context[0];
3554
3555 ql_resolve_queues_to_irqs(qdev);
3556
3557 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3558 atomic_set(&intr_context->irq_cnt, 0);
3559 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3560 status = request_irq(qdev->msi_x_entry[i].vector,
3561 intr_context->handler,
3562 0,
3563 intr_context->name,
3564 &qdev->rx_ring[i]);
3565 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003566 netif_err(qdev, ifup, qdev->ndev,
3567 "Failed request for MSIX interrupt %d.\n",
3568 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003569 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003570 }
3571 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003572 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3573 "trying msi or legacy interrupts.\n");
3574 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3575 "%s: irq = %d.\n", __func__, pdev->irq);
3576 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3577 "%s: context->name = %s.\n", __func__,
3578 intr_context->name);
3579 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3580 "%s: dev_id = 0x%p.\n", __func__,
3581 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003582 status =
3583 request_irq(pdev->irq, qlge_isr,
3584 test_bit(QL_MSI_ENABLED,
3585 &qdev->
3586 flags) ? 0 : IRQF_SHARED,
3587 intr_context->name, &qdev->rx_ring[0]);
3588 if (status)
3589 goto err_irq;
3590
Joe Perchesae9540f72010-02-09 11:49:52 +00003591 netif_err(qdev, ifup, qdev->ndev,
3592 "Hooked intr %d, queue type %s, with name %s.\n",
3593 i,
3594 qdev->rx_ring[0].type == DEFAULT_Q ?
3595 "DEFAULT_Q" :
3596 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3597 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3598 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003599 }
3600 intr_context->hooked = 1;
3601 }
3602 return status;
3603err_irq:
Joe Perchesa42c3a22014-04-24 18:50:59 -07003604 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003605 ql_free_irq(qdev);
3606 return status;
3607}
3608
3609static int ql_start_rss(struct ql_adapter *qdev)
3610{
Joe Perches215faf92010-12-21 02:16:10 -08003611 static const u8 init_hash_seed[] = {
3612 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3613 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3614 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3615 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3616 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3617 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003618 struct ricb *ricb = &qdev->ricb;
3619 int status = 0;
3620 int i;
3621 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3622
Ron Mercere3324712009-07-02 06:06:13 +00003623 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003624
Ron Mercerb2014ff2009-08-27 11:02:09 +00003625 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003626 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003627 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3628 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003629
3630 /*
3631 * Fill out the Indirection Table.
3632 */
Ron Mercer541ae282009-10-08 09:54:37 +00003633 for (i = 0; i < 1024; i++)
3634 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003635
Ron Mercer541ae282009-10-08 09:54:37 +00003636 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3637 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003638
Ron Mercere3324712009-07-02 06:06:13 +00003639 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003640 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003641 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003642 return status;
3643 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003644 return status;
3645}
3646
Ron Mercera5f59dc2009-07-02 06:06:07 +00003647static int ql_clear_routing_entries(struct ql_adapter *qdev)
3648{
3649 int i, status = 0;
3650
3651 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3652 if (status)
3653 return status;
3654 /* Clear all the entries in the routing table. */
3655 for (i = 0; i < 16; i++) {
3656 status = ql_set_routing_reg(qdev, i, 0, 0);
3657 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003658 netif_err(qdev, ifup, qdev->ndev,
3659 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003660 break;
3661 }
3662 }
3663 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3664 return status;
3665}
3666
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003667/* Initialize the frame-to-queue routing. */
3668static int ql_route_initialize(struct ql_adapter *qdev)
3669{
3670 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003671
3672 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003673 status = ql_clear_routing_entries(qdev);
3674 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003675 return status;
3676
3677 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3678 if (status)
3679 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003680
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003681 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3682 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003684 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003685 "Failed to init routing register "
3686 "for IP CSUM error packets.\n");
3687 goto exit;
3688 }
3689 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3690 RT_IDX_TU_CSUM_ERR, 1);
3691 if (status) {
3692 netif_err(qdev, ifup, qdev->ndev,
3693 "Failed to init routing register "
3694 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003695 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003696 }
3697 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3698 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003699 netif_err(qdev, ifup, qdev->ndev,
3700 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003701 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003702 }
3703 /* If we have more than one inbound queue, then turn on RSS in the
3704 * routing block.
3705 */
3706 if (qdev->rss_ring_count > 1) {
3707 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3708 RT_IDX_RSS_MATCH, 1);
3709 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003710 netif_err(qdev, ifup, qdev->ndev,
3711 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003712 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003713 }
3714 }
3715
3716 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3717 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003718 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003719 netif_err(qdev, ifup, qdev->ndev,
3720 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003721exit:
3722 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003723 return status;
3724}
3725
Ron Mercer2ee1e272009-03-03 12:10:33 +00003726int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003727{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003728 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003729
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003730 /* If check if the link is up and use to
3731 * determine if we are setting or clearing
3732 * the MAC address in the CAM.
3733 */
3734 set = ql_read32(qdev, STS);
3735 set &= qdev->port_link_up;
3736 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003737 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003738 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003739 return status;
3740 }
3741
3742 status = ql_route_initialize(qdev);
3743 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003744 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003745
3746 return status;
3747}
3748
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003749static int ql_adapter_initialize(struct ql_adapter *qdev)
3750{
3751 u32 value, mask;
3752 int i;
3753 int status = 0;
3754
3755 /*
3756 * Set up the System register to halt on errors.
3757 */
3758 value = SYS_EFE | SYS_FAE;
3759 mask = value << 16;
3760 ql_write32(qdev, SYS, mask | value);
3761
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003762 /* Set the default queue, and VLAN behavior. */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04003763 value = NIC_RCV_CFG_DFQ;
3764 mask = NIC_RCV_CFG_DFQ_MASK;
3765 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3766 value |= NIC_RCV_CFG_RV;
3767 mask |= (NIC_RCV_CFG_RV << 16);
3768 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003769 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3770
3771 /* Set the MPI interrupt to enabled. */
3772 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3773
3774 /* Enable the function, set pagesize, enable error checking. */
3775 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003776 FSC_EC | FSC_VM_PAGE_4K;
3777 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003778
3779 /* Set/clear header splitting. */
3780 mask = FSC_VM_PAGESIZE_MASK |
3781 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3782 ql_write32(qdev, FSC, mask | value);
3783
Ron Mercer572c5262010-01-02 10:37:42 +00003784 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003785
Ron Mercera3b71932009-10-08 09:54:38 +00003786 /* Set RX packet routing to use port/pci function on which the
3787 * packet arrived on in addition to usual frame routing.
3788 * This is helpful on bonding where both interfaces can have
3789 * the same MAC address.
3790 */
3791 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003792 /* Reroute all packets to our Interface.
3793 * They may have been routed to MPI firmware
3794 * due to WOL.
3795 */
3796 value = ql_read32(qdev, MGMT_RCV_CFG);
3797 value &= ~MGMT_RCV_CFG_RM;
3798 mask = 0xffff0000;
3799
3800 /* Sticky reg needs clearing due to WOL. */
3801 ql_write32(qdev, MGMT_RCV_CFG, mask);
3802 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3803
3804 /* Default WOL is enable on Mezz cards */
3805 if (qdev->pdev->subsystem_device == 0x0068 ||
3806 qdev->pdev->subsystem_device == 0x0180)
3807 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003808
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003809 /* Start up the rx queues. */
3810 for (i = 0; i < qdev->rx_ring_count; i++) {
3811 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3812 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003813 netif_err(qdev, ifup, qdev->ndev,
3814 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003815 return status;
3816 }
3817 }
3818
3819 /* If there is more than one inbound completion queue
3820 * then download a RICB to configure RSS.
3821 */
3822 if (qdev->rss_ring_count > 1) {
3823 status = ql_start_rss(qdev);
3824 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003825 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003826 return status;
3827 }
3828 }
3829
3830 /* Start up the tx queues. */
3831 for (i = 0; i < qdev->tx_ring_count; i++) {
3832 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3833 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003834 netif_err(qdev, ifup, qdev->ndev,
3835 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003836 return status;
3837 }
3838 }
3839
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003840 /* Initialize the port and set the max framesize. */
3841 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003842 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003843 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003844
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003845 /* Set up the MAC address and frame routing filter. */
3846 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003847 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003848 netif_err(qdev, ifup, qdev->ndev,
3849 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003850 return status;
3851 }
3852
3853 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003854 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003855 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003856
3857 return status;
3858}
3859
3860/* Issue soft reset to chip. */
3861static int ql_adapter_reset(struct ql_adapter *qdev)
3862{
3863 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003864 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003865 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003866
Ron Mercera5f59dc2009-07-02 06:06:07 +00003867 /* Clear all the entries in the routing table. */
3868 status = ql_clear_routing_entries(qdev);
3869 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003870 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003871 return status;
3872 }
3873
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003874 /* Check if bit is set then skip the mailbox command and
3875 * clear the bit, else we are in normal reset process.
3876 */
3877 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3878 /* Stop management traffic. */
3879 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003880
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003881 /* Wait for the NIC and MGMNT FIFOs to empty. */
3882 ql_wait_fifo_empty(qdev);
3883 } else
3884 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003885
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003886 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003887
Joe Perches3f6e7852015-05-19 21:44:52 -07003888 end_jiffies = jiffies + usecs_to_jiffies(30);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003889 do {
3890 value = ql_read32(qdev, RST_FO);
3891 if ((value & RST_FO_FR) == 0)
3892 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003893 cpu_relax();
3894 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003895
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003896 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003897 netif_err(qdev, ifdown, qdev->ndev,
3898 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003899 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003900 }
3901
Ron Mercer84087f42009-10-08 09:54:41 +00003902 /* Resume management traffic. */
3903 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003904 return status;
3905}
3906
3907static void ql_display_dev_info(struct net_device *ndev)
3908{
Joe Perchesb16fed02010-11-15 11:12:28 +00003909 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003910
Joe Perchesae9540f72010-02-09 11:49:52 +00003911 netif_info(qdev, probe, qdev->ndev,
3912 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3913 "XG Roll = %d, XG Rev = %d.\n",
3914 qdev->func,
3915 qdev->port,
3916 qdev->chip_rev_id & 0x0000000f,
3917 qdev->chip_rev_id >> 4 & 0x0000000f,
3918 qdev->chip_rev_id >> 8 & 0x0000000f,
3919 qdev->chip_rev_id >> 12 & 0x0000000f);
3920 netif_info(qdev, probe, qdev->ndev,
3921 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003922}
3923
stephen hemmingerac409212010-10-21 07:50:54 +00003924static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003925{
3926 int status = 0;
3927 u32 wol = MB_WOL_DISABLE;
3928
3929 /* The CAM is still intact after a reset, but if we
3930 * are doing WOL, then we may need to program the
3931 * routing regs. We would also need to issue the mailbox
3932 * commands to instruct the MPI what to do per the ethtool
3933 * settings.
3934 */
3935
3936 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3937 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003938 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003939 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003940 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003941 return -EINVAL;
3942 }
3943
3944 if (qdev->wol & WAKE_MAGIC) {
3945 status = ql_mb_wol_set_magic(qdev, 1);
3946 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003947 netif_err(qdev, ifdown, qdev->ndev,
3948 "Failed to set magic packet on %s.\n",
3949 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003950 return status;
3951 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003952 netif_info(qdev, drv, qdev->ndev,
3953 "Enabled magic packet successfully on %s.\n",
3954 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003955
3956 wol |= MB_WOL_MAGIC_PKT;
3957 }
3958
3959 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003960 wol |= MB_WOL_MODE_ON;
3961 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003962 netif_err(qdev, drv, qdev->ndev,
3963 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003964 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003965 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003966 }
3967
3968 return status;
3969}
3970
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003971static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003972{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003973
Ron Mercer6497b602009-02-12 16:37:13 -08003974 /* Don't kill the reset worker thread if we
3975 * are in the process of recovery.
3976 */
3977 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3978 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003979 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3980 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003981 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003982 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003983 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003984}
3985
3986static int ql_adapter_down(struct ql_adapter *qdev)
3987{
3988 int i, status = 0;
3989
3990 ql_link_off(qdev);
3991
3992 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003993
Ron Mercer39aa8162009-08-27 11:02:11 +00003994 for (i = 0; i < qdev->rss_ring_count; i++)
3995 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003996
3997 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3998
3999 ql_disable_interrupts(qdev);
4000
4001 ql_tx_ring_clean(qdev);
4002
Ron Mercer6b318cb2009-03-09 10:59:26 +00004003 /* Call netif_napi_del() from common point.
4004 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004005 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00004006 netif_napi_del(&qdev->rx_ring[i].napi);
4007
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004008 status = ql_adapter_reset(qdev);
4009 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004010 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4011 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00004012 ql_free_rx_buffers(qdev);
4013
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004014 return status;
4015}
4016
4017static int ql_adapter_up(struct ql_adapter *qdev)
4018{
4019 int err = 0;
4020
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004021 err = ql_adapter_initialize(qdev);
4022 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004023 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004024 goto err_init;
4025 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004026 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00004027 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00004028 /* If the port is initialized and the
4029 * link is up the turn on the carrier.
4030 */
4031 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4032 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00004033 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004034 /* Restore rx mode. */
4035 clear_bit(QL_ALLMULTI, &qdev->flags);
4036 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4037 qlge_set_multicast_list(qdev->ndev);
4038
Ron Mercerc1b60092010-10-27 04:58:12 +00004039 /* Restore vlan setting. */
4040 qlge_restore_vlan(qdev);
4041
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004042 ql_enable_interrupts(qdev);
4043 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004044 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004045
4046 return 0;
4047err_init:
4048 ql_adapter_reset(qdev);
4049 return err;
4050}
4051
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004052static void ql_release_adapter_resources(struct ql_adapter *qdev)
4053{
4054 ql_free_mem_resources(qdev);
4055 ql_free_irq(qdev);
4056}
4057
4058static int ql_get_adapter_resources(struct ql_adapter *qdev)
4059{
4060 int status = 0;
4061
4062 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004063 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004064 return -ENOMEM;
4065 }
4066 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004067 return status;
4068}
4069
4070static int qlge_close(struct net_device *ndev)
4071{
4072 struct ql_adapter *qdev = netdev_priv(ndev);
4073
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004074 /* If we hit pci_channel_io_perm_failure
4075 * failure condition, then we already
4076 * brought the adapter down.
4077 */
4078 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004079 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004080 clear_bit(QL_EEH_FATAL, &qdev->flags);
4081 return 0;
4082 }
4083
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004084 /*
4085 * Wait for device to recover from a reset.
4086 * (Rarely happens, but possible.)
4087 */
4088 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4089 msleep(1);
4090 ql_adapter_down(qdev);
4091 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004092 return 0;
4093}
4094
4095static int ql_configure_rings(struct ql_adapter *qdev)
4096{
4097 int i;
4098 struct rx_ring *rx_ring;
4099 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004100 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004101 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4102 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4103
4104 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004105
Ron Mercera4ab6132009-08-27 11:02:10 +00004106 /* In a perfect world we have one RSS ring for each CPU
4107 * and each has it's own vector. To do that we ask for
4108 * cpu_cnt vectors. ql_enable_msix() will adjust the
4109 * vector count to what we actually get. We then
4110 * allocate an RSS ring for each.
4111 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004112 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004113 qdev->intr_count = cpu_cnt;
4114 ql_enable_msix(qdev);
4115 /* Adjust the RSS ring count to the actual vector count. */
4116 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004117 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004118 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004119
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004120 for (i = 0; i < qdev->tx_ring_count; i++) {
4121 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004122 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004123 tx_ring->qdev = qdev;
4124 tx_ring->wq_id = i;
4125 tx_ring->wq_len = qdev->tx_ring_size;
4126 tx_ring->wq_size =
4127 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4128
4129 /*
4130 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004131 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004132 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004133 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004134 }
4135
4136 for (i = 0; i < qdev->rx_ring_count; i++) {
4137 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004138 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004139 rx_ring->qdev = qdev;
4140 rx_ring->cq_id = i;
4141 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004142 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004143 /*
4144 * Inbound (RSS) queues.
4145 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004146 rx_ring->cq_len = qdev->rx_ring_size;
4147 rx_ring->cq_size =
4148 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4149 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4150 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004151 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004152 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004153 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4154 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004155 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004156 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004157 rx_ring->type = RX_Q;
4158 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004159 /*
4160 * Outbound queue handles outbound completions only.
4161 */
4162 /* outbound cq is same size as tx_ring it services. */
4163 rx_ring->cq_len = qdev->tx_ring_size;
4164 rx_ring->cq_size =
4165 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4166 rx_ring->lbq_len = 0;
4167 rx_ring->lbq_size = 0;
4168 rx_ring->lbq_buf_size = 0;
4169 rx_ring->sbq_len = 0;
4170 rx_ring->sbq_size = 0;
4171 rx_ring->sbq_buf_size = 0;
4172 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004173 }
4174 }
4175 return 0;
4176}
4177
4178static int qlge_open(struct net_device *ndev)
4179{
4180 int err = 0;
4181 struct ql_adapter *qdev = netdev_priv(ndev);
4182
Ron Mercer74e12432009-11-11 12:54:04 +00004183 err = ql_adapter_reset(qdev);
4184 if (err)
4185 return err;
4186
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004187 err = ql_configure_rings(qdev);
4188 if (err)
4189 return err;
4190
4191 err = ql_get_adapter_resources(qdev);
4192 if (err)
4193 goto error_up;
4194
4195 err = ql_adapter_up(qdev);
4196 if (err)
4197 goto error_up;
4198
4199 return err;
4200
4201error_up:
4202 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004203 return err;
4204}
4205
Ron Mercer7c734352009-10-19 03:32:19 +00004206static int ql_change_rx_buffers(struct ql_adapter *qdev)
4207{
4208 struct rx_ring *rx_ring;
4209 int i, status;
4210 u32 lbq_buf_len;
4211
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004212 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Dan Carpenter351434c2015-12-15 13:52:36 +03004214 int i = 4;
4215
4216 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004217 netif_err(qdev, ifup, qdev->ndev,
4218 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004219 ssleep(1);
4220 }
4221
4222 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004223 netif_err(qdev, ifup, qdev->ndev,
4224 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004225 return -ETIMEDOUT;
4226 }
4227 }
4228
4229 status = ql_adapter_down(qdev);
4230 if (status)
4231 goto error;
4232
4233 /* Get the new rx buffer size. */
4234 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4235 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4236 qdev->lbq_buf_order = get_order(lbq_buf_len);
4237
4238 for (i = 0; i < qdev->rss_ring_count; i++) {
4239 rx_ring = &qdev->rx_ring[i];
4240 /* Set the new size. */
4241 rx_ring->lbq_buf_size = lbq_buf_len;
4242 }
4243
4244 status = ql_adapter_up(qdev);
4245 if (status)
4246 goto error;
4247
4248 return status;
4249error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004250 netif_alert(qdev, ifup, qdev->ndev,
4251 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004252 set_bit(QL_ADAPTER_UP, &qdev->flags);
4253 dev_close(qdev->ndev);
4254 return status;
4255}
4256
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004257static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4258{
4259 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004260 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004261
4262 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004263 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004264 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004265 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004266 } else
4267 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004268
4269 queue_delayed_work(qdev->workqueue,
4270 &qdev->mpi_port_cfg_work, 3*HZ);
4271
Breno Leitao746079d2010-02-04 10:11:19 +00004272 ndev->mtu = new_mtu;
4273
Ron Mercer7c734352009-10-19 03:32:19 +00004274 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004275 return 0;
4276 }
4277
Ron Mercer7c734352009-10-19 03:32:19 +00004278 status = ql_change_rx_buffers(qdev);
4279 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004280 netif_err(qdev, ifup, qdev->ndev,
4281 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004282 }
4283
4284 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004285}
4286
4287static struct net_device_stats *qlge_get_stats(struct net_device
4288 *ndev)
4289{
Ron Mercer885ee392009-11-03 13:49:31 +00004290 struct ql_adapter *qdev = netdev_priv(ndev);
4291 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4292 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4293 unsigned long pkts, mcast, dropped, errors, bytes;
4294 int i;
4295
4296 /* Get RX stats. */
4297 pkts = mcast = dropped = errors = bytes = 0;
4298 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4299 pkts += rx_ring->rx_packets;
4300 bytes += rx_ring->rx_bytes;
4301 dropped += rx_ring->rx_dropped;
4302 errors += rx_ring->rx_errors;
4303 mcast += rx_ring->rx_multicast;
4304 }
4305 ndev->stats.rx_packets = pkts;
4306 ndev->stats.rx_bytes = bytes;
4307 ndev->stats.rx_dropped = dropped;
4308 ndev->stats.rx_errors = errors;
4309 ndev->stats.multicast = mcast;
4310
4311 /* Get TX stats. */
4312 pkts = errors = bytes = 0;
4313 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4314 pkts += tx_ring->tx_packets;
4315 bytes += tx_ring->tx_bytes;
4316 errors += tx_ring->tx_errors;
4317 }
4318 ndev->stats.tx_packets = pkts;
4319 ndev->stats.tx_bytes = bytes;
4320 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004321 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004322}
4323
stephen hemmingerac409212010-10-21 07:50:54 +00004324static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004325{
Joe Perchesb16fed02010-11-15 11:12:28 +00004326 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004327 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004328 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004329
Ron Mercercc288f52009-02-23 10:42:14 +00004330 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4331 if (status)
4332 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333 /*
4334 * Set or clear promiscuous mode if a
4335 * transition is taking place.
4336 */
4337 if (ndev->flags & IFF_PROMISC) {
4338 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4339 if (ql_set_routing_reg
4340 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004341 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004342 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004343 } else {
4344 set_bit(QL_PROMISCUOUS, &qdev->flags);
4345 }
4346 }
4347 } else {
4348 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4349 if (ql_set_routing_reg
4350 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004351 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004352 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004353 } else {
4354 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4355 }
4356 }
4357 }
4358
4359 /*
4360 * Set or clear all multicast mode if a
4361 * transition is taking place.
4362 */
4363 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004364 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004365 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4366 if (ql_set_routing_reg
4367 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004368 netif_err(qdev, hw, qdev->ndev,
4369 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004370 } else {
4371 set_bit(QL_ALLMULTI, &qdev->flags);
4372 }
4373 }
4374 } else {
4375 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4376 if (ql_set_routing_reg
4377 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004378 netif_err(qdev, hw, qdev->ndev,
4379 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004380 } else {
4381 clear_bit(QL_ALLMULTI, &qdev->flags);
4382 }
4383 }
4384 }
4385
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004386 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004387 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4388 if (status)
4389 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004390 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004391 netdev_for_each_mc_addr(ha, ndev) {
4392 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004393 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004394 netif_err(qdev, hw, qdev->ndev,
4395 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004396 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004397 goto exit;
4398 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004399 i++;
4400 }
Ron Mercercc288f52009-02-23 10:42:14 +00004401 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004402 if (ql_set_routing_reg
4403 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004404 netif_err(qdev, hw, qdev->ndev,
4405 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004406 } else {
4407 set_bit(QL_ALLMULTI, &qdev->flags);
4408 }
4409 }
4410exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004411 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004412}
4413
4414static int qlge_set_mac_address(struct net_device *ndev, void *p)
4415{
Joe Perchesb16fed02010-11-15 11:12:28 +00004416 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004417 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004418 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004419
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004420 if (!is_valid_ether_addr(addr->sa_data))
4421 return -EADDRNOTAVAIL;
4422 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004423 /* Update local copy of current mac address. */
4424 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004425
Ron Mercercc288f52009-02-23 10:42:14 +00004426 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4427 if (status)
4428 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004429 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4430 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004431 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004432 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004433 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4434 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004435}
4436
4437static void qlge_tx_timeout(struct net_device *ndev)
4438{
Joe Perchesb16fed02010-11-15 11:12:28 +00004439 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004440 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004441}
4442
4443static void ql_asic_reset_work(struct work_struct *work)
4444{
4445 struct ql_adapter *qdev =
4446 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004447 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004448 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004449 status = ql_adapter_down(qdev);
4450 if (status)
4451 goto error;
4452
4453 status = ql_adapter_up(qdev);
4454 if (status)
4455 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004456
4457 /* Restore rx mode. */
4458 clear_bit(QL_ALLMULTI, &qdev->flags);
4459 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4460 qlge_set_multicast_list(qdev->ndev);
4461
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004462 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004463 return;
4464error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004465 netif_alert(qdev, ifup, qdev->ndev,
4466 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004467
Ron Mercerdb988122009-03-09 10:59:17 +00004468 set_bit(QL_ADAPTER_UP, &qdev->flags);
4469 dev_close(qdev->ndev);
4470 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004471}
4472
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004473static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004474 .get_flash = ql_get_8012_flash_params,
4475 .port_initialize = ql_8012_port_initialize,
4476};
4477
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004478static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004479 .get_flash = ql_get_8000_flash_params,
4480 .port_initialize = ql_8000_port_initialize,
4481};
4482
Ron Mercere4552f52009-06-09 05:39:32 +00004483/* Find the pcie function number for the other NIC
4484 * on this chip. Since both NIC functions share a
4485 * common firmware we have the lowest enabled function
4486 * do any common work. Examples would be resetting
4487 * after a fatal firmware error, or doing a firmware
4488 * coredump.
4489 */
4490static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004491{
Ron Mercere4552f52009-06-09 05:39:32 +00004492 int status = 0;
4493 u32 temp;
4494 u32 nic_func1, nic_func2;
4495
4496 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4497 &temp);
4498 if (status)
4499 return status;
4500
4501 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4502 MPI_TEST_NIC_FUNC_MASK);
4503 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4504 MPI_TEST_NIC_FUNC_MASK);
4505
4506 if (qdev->func == nic_func1)
4507 qdev->alt_func = nic_func2;
4508 else if (qdev->func == nic_func2)
4509 qdev->alt_func = nic_func1;
4510 else
4511 status = -EIO;
4512
4513 return status;
4514}
4515
4516static int ql_get_board_info(struct ql_adapter *qdev)
4517{
4518 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004519 qdev->func =
4520 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004521 if (qdev->func > 3)
4522 return -EIO;
4523
4524 status = ql_get_alt_pcie_func(qdev);
4525 if (status)
4526 return status;
4527
4528 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4529 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4531 qdev->port_link_up = STS_PL1;
4532 qdev->port_init = STS_PI1;
4533 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4534 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4535 } else {
4536 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4537 qdev->port_link_up = STS_PL0;
4538 qdev->port_init = STS_PI0;
4539 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4540 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4541 }
4542 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004543 qdev->device_id = qdev->pdev->device;
4544 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4545 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004546 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4547 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004548 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549}
4550
4551static void ql_release_all(struct pci_dev *pdev)
4552{
4553 struct net_device *ndev = pci_get_drvdata(pdev);
4554 struct ql_adapter *qdev = netdev_priv(ndev);
4555
4556 if (qdev->workqueue) {
4557 destroy_workqueue(qdev->workqueue);
4558 qdev->workqueue = NULL;
4559 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004560
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004561 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004562 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004563 if (qdev->doorbell_area)
4564 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004565 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 pci_release_regions(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004567}
4568
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004569static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4570 int cards_found)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571{
4572 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004573 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574
Ron Mercere3324712009-07-02 06:06:13 +00004575 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576 err = pci_enable_device(pdev);
4577 if (err) {
4578 dev_err(&pdev->dev, "PCI device enable failed.\n");
4579 return err;
4580 }
4581
Ron Mercerebd6e772009-09-29 08:39:25 +00004582 qdev->ndev = ndev;
4583 qdev->pdev = pdev;
4584 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004585
Ron Mercerbc9167f2009-10-10 09:35:04 +00004586 /* Set PCIe read request size */
4587 err = pcie_set_readrq(pdev, 4096);
4588 if (err) {
4589 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004590 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004591 }
4592
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004593 err = pci_request_regions(pdev, DRV_NAME);
4594 if (err) {
4595 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004596 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004597 }
4598
4599 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004600 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004601 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004602 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004603 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004604 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004605 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004606 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004607 }
4608
4609 if (err) {
4610 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004611 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004612 }
4613
Ron Mercer73475332009-11-06 07:44:58 +00004614 /* Set PCIe reset type for EEH to fundamental. */
4615 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004616 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004617 qdev->reg_base =
4618 ioremap_nocache(pci_resource_start(pdev, 1),
4619 pci_resource_len(pdev, 1));
4620 if (!qdev->reg_base) {
4621 dev_err(&pdev->dev, "Register mapping failed.\n");
4622 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004623 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004624 }
4625
4626 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4627 qdev->doorbell_area =
4628 ioremap_nocache(pci_resource_start(pdev, 3),
4629 pci_resource_len(pdev, 3));
4630 if (!qdev->doorbell_area) {
4631 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4632 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004633 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004634 }
4635
Ron Mercere4552f52009-06-09 05:39:32 +00004636 err = ql_get_board_info(qdev);
4637 if (err) {
4638 dev_err(&pdev->dev, "Register access failed.\n");
4639 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004640 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004641 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004642 qdev->msg_enable = netif_msg_init(debug, default_msg);
4643 spin_lock_init(&qdev->hw_lock);
4644 spin_lock_init(&qdev->stats_lock);
4645
Ron Mercer8aae2602010-01-15 13:31:28 +00004646 if (qlge_mpi_coredump) {
4647 qdev->mpi_coredump =
4648 vmalloc(sizeof(struct ql_mpi_coredump));
4649 if (qdev->mpi_coredump == NULL) {
Ron Mercer8aae2602010-01-15 13:31:28 +00004650 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004651 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004652 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004653 if (qlge_force_coredump)
4654 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004655 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004656 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004657 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004658 if (err) {
4659 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004660 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004661 }
4662
Ron Mercer801e9092010-02-17 06:41:22 +00004663 /* Keep local copy of current mac address. */
4664 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004665
4666 /* Set up the default ring sizes. */
4667 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4668 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4669
4670 /* Set up the coalescing parameters. */
4671 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4672 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4673 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4674 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4675
4676 /*
4677 * Set up the operating parameters.
4678 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004679 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4680 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4681 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4682 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004683 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004684 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004685 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004686 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004687 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004688
4689 if (!cards_found) {
4690 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4691 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4692 DRV_NAME, DRV_VERSION);
4693 }
4694 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004695err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004696 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004697err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004698 pci_disable_device(pdev);
4699 return err;
4700}
4701
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004702static const struct net_device_ops qlge_netdev_ops = {
4703 .ndo_open = qlge_open,
4704 .ndo_stop = qlge_close,
4705 .ndo_start_xmit = qlge_send,
4706 .ndo_change_mtu = qlge_change_mtu,
4707 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004708 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004709 .ndo_set_mac_address = qlge_set_mac_address,
4710 .ndo_validate_addr = eth_validate_addr,
4711 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004712 .ndo_fix_features = qlge_fix_features,
4713 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004714 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4715 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004716};
4717
Ron Mercer15c052f2010-02-04 13:32:46 -08004718static void ql_timer(unsigned long data)
4719{
4720 struct ql_adapter *qdev = (struct ql_adapter *)data;
4721 u32 var = 0;
4722
4723 var = ql_read32(qdev, STS);
4724 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004725 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004726 return;
4727 }
4728
Breno Leitao72046d82010-07-01 03:00:17 +00004729 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004730}
4731
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004732static int qlge_probe(struct pci_dev *pdev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004733 const struct pci_device_id *pci_entry)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004734{
4735 struct net_device *ndev = NULL;
4736 struct ql_adapter *qdev = NULL;
4737 static int cards_found = 0;
4738 int err = 0;
4739
Ron Mercer1e213302009-03-09 10:59:21 +00004740 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004741 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004742 if (!ndev)
4743 return -ENOMEM;
4744
4745 err = ql_init_device(pdev, ndev, cards_found);
4746 if (err < 0) {
4747 free_netdev(ndev);
4748 return err;
4749 }
4750
4751 qdev = netdev_priv(ndev);
4752 SET_NETDEV_DEV(ndev, &pdev->dev);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04004753 ndev->hw_features = NETIF_F_SG |
4754 NETIF_F_IP_CSUM |
4755 NETIF_F_TSO |
4756 NETIF_F_TSO_ECN |
4757 NETIF_F_HW_VLAN_CTAG_TX |
4758 NETIF_F_HW_VLAN_CTAG_RX |
4759 NETIF_F_HW_VLAN_CTAG_FILTER |
4760 NETIF_F_RXCSUM;
4761 ndev->features = ndev->hw_features;
brenohl@br.ibm.com1a0150a92012-07-27 08:54:52 +00004762 ndev->vlan_features = ndev->hw_features;
Jitendra Kalsaria51bb3522014-01-14 13:57:25 -05004763 /* vlan gets same features (except vlan filter) */
Vlad Yasevichf6d1ac42014-03-27 22:14:46 -04004764 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4765 NETIF_F_HW_VLAN_CTAG_TX |
4766 NETIF_F_HW_VLAN_CTAG_RX);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004767
4768 if (test_bit(QL_DMA64, &qdev->flags))
4769 ndev->features |= NETIF_F_HIGHDMA;
4770
4771 /*
4772 * Set up net_device structure.
4773 */
4774 ndev->tx_queue_len = qdev->tx_ring_size;
4775 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004776
4777 ndev->netdev_ops = &qlge_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004778 ndev->ethtool_ops = &qlge_ethtool_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004779 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004780
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004781 err = register_netdev(ndev);
4782 if (err) {
4783 dev_err(&pdev->dev, "net device registration failed.\n");
4784 ql_release_all(pdev);
4785 pci_disable_device(pdev);
Wei Yongjun4d2593c2013-05-22 23:09:50 +00004786 free_netdev(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004787 return err;
4788 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004789 /* Start up the timer to trigger EEH if
4790 * the bus goes dead
4791 */
4792 init_timer_deferrable(&qdev->timer);
4793 qdev->timer.data = (unsigned long)qdev;
4794 qdev->timer.function = ql_timer;
4795 qdev->timer.expires = jiffies + (5*HZ);
4796 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004797 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004798 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004799 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004800 cards_found++;
4801 return 0;
4802}
4803
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004804netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4805{
4806 return qlge_send(skb, ndev);
4807}
4808
4809int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4810{
4811 return ql_clean_inbound_rx_ring(rx_ring, budget);
4812}
4813
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004814static void qlge_remove(struct pci_dev *pdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004815{
4816 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004817 struct ql_adapter *qdev = netdev_priv(ndev);
4818 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004819 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004820 unregister_netdev(ndev);
4821 ql_release_all(pdev);
4822 pci_disable_device(pdev);
4823 free_netdev(ndev);
4824}
4825
Ron Mercer6d190c62009-10-28 08:39:20 +00004826/* Clean up resources without touching hardware. */
4827static void ql_eeh_close(struct net_device *ndev)
4828{
4829 int i;
4830 struct ql_adapter *qdev = netdev_priv(ndev);
4831
4832 if (netif_carrier_ok(ndev)) {
4833 netif_carrier_off(ndev);
4834 netif_stop_queue(ndev);
4835 }
4836
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004837 /* Disabling the timer */
4838 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004839 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004840
4841 for (i = 0; i < qdev->rss_ring_count; i++)
4842 netif_napi_del(&qdev->rx_ring[i].napi);
4843
4844 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4845 ql_tx_ring_clean(qdev);
4846 ql_free_rx_buffers(qdev);
4847 ql_release_adapter_resources(qdev);
4848}
4849
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004850/*
4851 * This callback is called by the PCI subsystem whenever
4852 * a PCI bus error is detected.
4853 */
4854static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4855 enum pci_channel_state state)
4856{
4857 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004858 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004859
Ron Mercer6d190c62009-10-28 08:39:20 +00004860 switch (state) {
4861 case pci_channel_io_normal:
4862 return PCI_ERS_RESULT_CAN_RECOVER;
4863 case pci_channel_io_frozen:
4864 netif_device_detach(ndev);
4865 if (netif_running(ndev))
4866 ql_eeh_close(ndev);
4867 pci_disable_device(pdev);
4868 return PCI_ERS_RESULT_NEED_RESET;
4869 case pci_channel_io_perm_failure:
4870 dev_err(&pdev->dev,
4871 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004872 ql_eeh_close(ndev);
4873 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004874 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004875 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004876
4877 /* Request a slot reset. */
4878 return PCI_ERS_RESULT_NEED_RESET;
4879}
4880
4881/*
4882 * This callback is called after the PCI buss has been reset.
4883 * Basically, this tries to restart the card from scratch.
4884 * This is a shortened version of the device probe/discovery code,
4885 * it resembles the first-half of the () routine.
4886 */
4887static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4888{
4889 struct net_device *ndev = pci_get_drvdata(pdev);
4890 struct ql_adapter *qdev = netdev_priv(ndev);
4891
Ron Mercer6d190c62009-10-28 08:39:20 +00004892 pdev->error_state = pci_channel_io_normal;
4893
4894 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004895 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004896 netif_err(qdev, ifup, qdev->ndev,
4897 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004898 return PCI_ERS_RESULT_DISCONNECT;
4899 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004900 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004901
4902 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004903 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004904 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004905 return PCI_ERS_RESULT_DISCONNECT;
4906 }
4907
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004908 return PCI_ERS_RESULT_RECOVERED;
4909}
4910
4911static void qlge_io_resume(struct pci_dev *pdev)
4912{
4913 struct net_device *ndev = pci_get_drvdata(pdev);
4914 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004915 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004916
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004917 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004918 err = qlge_open(ndev);
4919 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004920 netif_err(qdev, ifup, qdev->ndev,
4921 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004922 return;
4923 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004924 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004925 netif_err(qdev, ifup, qdev->ndev,
4926 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004927 }
Breno Leitao72046d82010-07-01 03:00:17 +00004928 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004929 netif_device_attach(ndev);
4930}
4931
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004932static const struct pci_error_handlers qlge_err_handler = {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004933 .error_detected = qlge_io_error_detected,
4934 .slot_reset = qlge_io_slot_reset,
4935 .resume = qlge_io_resume,
4936};
4937
4938static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4939{
4940 struct net_device *ndev = pci_get_drvdata(pdev);
4941 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004942 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004943
4944 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004945 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004946
4947 if (netif_running(ndev)) {
4948 err = ql_adapter_down(qdev);
4949 if (!err)
4950 return err;
4951 }
4952
Ron Mercerbc083ce2009-10-21 11:07:40 +00004953 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004954 err = pci_save_state(pdev);
4955 if (err)
4956 return err;
4957
4958 pci_disable_device(pdev);
4959
4960 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4961
4962 return 0;
4963}
4964
David S. Miller04da2cf2008-09-19 16:14:24 -07004965#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004966static int qlge_resume(struct pci_dev *pdev)
4967{
4968 struct net_device *ndev = pci_get_drvdata(pdev);
4969 struct ql_adapter *qdev = netdev_priv(ndev);
4970 int err;
4971
4972 pci_set_power_state(pdev, PCI_D0);
4973 pci_restore_state(pdev);
4974 err = pci_enable_device(pdev);
4975 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004976 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004977 return err;
4978 }
4979 pci_set_master(pdev);
4980
4981 pci_enable_wake(pdev, PCI_D3hot, 0);
4982 pci_enable_wake(pdev, PCI_D3cold, 0);
4983
4984 if (netif_running(ndev)) {
4985 err = ql_adapter_up(qdev);
4986 if (err)
4987 return err;
4988 }
4989
Breno Leitao72046d82010-07-01 03:00:17 +00004990 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004991 netif_device_attach(ndev);
4992
4993 return 0;
4994}
David S. Miller04da2cf2008-09-19 16:14:24 -07004995#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004996
4997static void qlge_shutdown(struct pci_dev *pdev)
4998{
4999 qlge_suspend(pdev, PMSG_SUSPEND);
5000}
5001
5002static struct pci_driver qlge_driver = {
5003 .name = DRV_NAME,
5004 .id_table = qlge_pci_tbl,
5005 .probe = qlge_probe,
Bill Pemberton5d8e8722012-12-03 09:23:27 -05005006 .remove = qlge_remove,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005007#ifdef CONFIG_PM
5008 .suspend = qlge_suspend,
5009 .resume = qlge_resume,
5010#endif
5011 .shutdown = qlge_shutdown,
5012 .err_handler = &qlge_err_handler
5013};
5014
Peter Hüwe70a611d2013-05-21 12:58:08 +00005015module_pci_driver(qlge_driver);