blob: 83d72106471c1237171f68955a1885df3ed950c1 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +00009#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040010#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000036#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040041#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000061/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040063/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
Sonny Rao84cf7022010-11-18 11:50:02 +000066static int debug = -1; /* defaults above */
67module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040068MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000073static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000074module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000075MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040076
Ron Mercer8aae2602010-01-15 13:31:28 +000077static int qlge_mpi_coredump;
78module_param(qlge_mpi_coredump, int, 0);
79MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000081 "Default is OFF - Do Not allocate memory. ");
82
83static int qlge_force_coredump;
84module_param(qlge_force_coredump, int, 0);
85MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000088
Benoit Taine9baa3c32014-08-08 15:56:03 +020089static const struct pci_device_id qlge_pci_tbl[] = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040092 /* required last entry */
93 {0,}
94};
95
96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -040098static int ql_wol(struct ql_adapter *);
99static void qlge_set_multicast_list(struct net_device *);
100static int ql_adapter_down(struct ql_adapter *);
101static int ql_adapter_up(struct ql_adapter *);
stephen hemmingerac409212010-10-21 07:50:54 +0000102
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400103/* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
106 */
107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108{
109 u32 sem_bits = 0;
110
111 switch (sem_mask) {
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 break;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 break;
118 case SEM_ICB_MASK:
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 break;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 break;
124 case SEM_FLASH_MASK:
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 break;
127 case SEM_PROBE_MASK:
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 break;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 break;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 break;
136 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400138 return -EINVAL;
139 }
140
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
143}
144
145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000147 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400148 do {
149 if (!ql_sem_trylock(qdev, sem_mask))
150 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000151 udelay(100);
152 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400153 return -ETIMEDOUT;
154}
155
156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157{
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
160}
161
162/* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 */
167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168{
169 u32 temp;
170 int count = UDELAY_COUNT;
171
172 while (count) {
173 temp = ql_read32(qdev, reg);
174
175 /* check for errors */
176 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400180 return -EIO;
181 } else if (temp & bit)
182 return 0;
183 udelay(UDELAY_DELAY);
184 count--;
185 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400188 return -ETIMEDOUT;
189}
190
191/* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
193 */
194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195{
196 int count = UDELAY_COUNT;
197 u32 temp;
198
199 while (count) {
200 temp = ql_read32(qdev, CFG);
201 if (temp & CFG_LE)
202 return -EIO;
203 if (!(temp & bit))
204 return 0;
205 udelay(UDELAY_DELAY);
206 count--;
207 }
208 return -ETIMEDOUT;
209}
210
211
212/* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
214 */
215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 u16 q_id)
217{
218 u64 map;
219 int status = 0;
220 int direction;
221 u32 mask;
222 u32 value;
223
224 direction =
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 PCI_DMA_FROMDEVICE;
227
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400231 return -ENOMEM;
232 }
233
Ron Mercer4322c5b2009-07-02 06:06:06 +0000234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 if (status)
236 return status;
237
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400238 status = ql_wait_cfg(qdev, bit);
239 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400242 goto exit;
243 }
244
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400247
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
251
252 /*
253 * Wait for the bit to clear after signaling hw.
254 */
255 status = ql_wait_cfg(qdev, bit);
256exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 pci_unmap_single(qdev->pdev, map, size, direction);
259 return status;
260}
261
262/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 u32 *value)
265{
266 u32 offset = 0;
267 int status;
268
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400269 switch (type) {
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
272 {
273 status =
274 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400276 if (status)
277 goto exit;
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 status =
288 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 status =
303 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400305 if (status)
306 goto exit;
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 status =
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800312 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400313 if (status)
314 goto exit;
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 }
317 break;
318 }
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
321 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400324 status = -EPERM;
325 }
326exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400327 return status;
328}
329
330/* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
332 */
333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 u16 index)
335{
336 u32 offset = 0;
337 int status = 0;
338
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400339 switch (type) {
340 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000341 {
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
353 type | MAC_ADDR_E);
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
362 type | MAC_ADDR_E);
363
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 status =
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 goto exit;
370 break;
371 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 case MAC_ADDR_TYPE_CAM_MAC:
373 {
374 u32 cam_output;
375 u32 upper = (addr[0] << 8) | addr[1];
376 u32 lower =
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400379 status =
380 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400382 if (status)
383 goto exit;
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 type); /* type */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 status =
389 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400391 if (status)
392 goto exit;
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 type); /* type */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 status =
398 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400400 if (status)
401 goto exit;
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 type); /* type */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
408 */
Ron Mercer76b26692009-10-08 09:54:40 +0000409 cam_output = (CAM_OUT_ROUTE_NIC |
410 (qdev->
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
Patrick McHardyf6469682013-04-19 02:04:27 +0000413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400417 break;
418 }
419 case MAC_ADDR_TYPE_VLAN:
420 {
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
426 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400427 status =
428 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400430 if (status)
431 goto exit;
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 type | /* type */
435 enable_bit); /* enable/disable */
436 break;
437 }
438 case MAC_ADDR_TYPE_MULTI_FLTR:
439 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 status = -EPERM;
443 }
444exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400445 return status;
446}
447
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000448/* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
451 */
452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453{
454 int status;
455 char zero_mac_addr[ETH_ALEN];
456 char *addr;
457
458 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000459 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000462 } else {
Joe Perchesc7bf7162015-03-02 19:54:47 -0800463 eth_zero_addr(zero_mac_addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000464 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000467 }
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 if (status)
470 return status;
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000477 return status;
478}
479
Ron Mercer6a473302009-07-02 06:06:12 +0000480void ql_link_on(struct ql_adapter *qdev)
481{
Joe Perchesae9540f72010-02-09 11:49:52 +0000482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
485}
486
487void ql_link_off(struct ql_adapter *qdev)
488{
Joe Perchesae9540f72010-02-09 11:49:52 +0000489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
492}
493
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400494/* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
496 */
497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498{
499 int status = 0;
500
Ron Mercer939678f2009-01-04 17:08:29 -0800501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400502 if (status)
503 goto exit;
504
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400508 if (status)
509 goto exit;
510 *value = ql_read32(qdev, RT_DATA);
511exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400512 return status;
513}
514
515/* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
519 */
520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 int enable)
522{
Ron Mercer8587ea32009-02-23 10:42:15 +0000523 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 u32 value = 0;
525
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400526 switch (mask) {
527 case RT_IDX_CAM_HIT:
528 {
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 break;
533 }
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 {
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 break;
540 }
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 {
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 break;
547 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 {
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
554 break;
555 }
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 {
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
562 break;
563 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 {
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 break;
570 }
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000573 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 break;
577 }
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000580 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 break;
584 }
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 {
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 break;
591 }
592 case 0: /* Clear the E-bit on an entry. */
593 {
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
597 break;
598 }
599 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400602 status = -EPERM;
603 goto exit;
604 }
605
606 if (value) {
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 if (status)
609 goto exit;
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
613 }
614exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 return status;
616}
617
618static void ql_enable_interrupts(struct ql_adapter *qdev)
619{
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621}
622
623static void ql_disable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626}
627
628/* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300631 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400632 * a worker finishes. Once it hits zero we enable the interrupt.
633 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400635{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700636 u32 var = 0;
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
639
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400644 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700645 ctx->intr_en_mask);
646 var = ql_read32(qdev, STS);
647 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400648 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700649
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
653 ctx->intr_en_mask);
654 var = ql_read32(qdev, STS);
655 }
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400658}
659
660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661{
662 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700663 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400664
Ron Mercerbb0d2152008-10-20 10:30:26 -0700665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
667 */
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669 return 0;
670
671 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000672 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700673 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400674 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700675 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400676 var = ql_read32(qdev, STS);
677 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000679 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400680 return var;
681}
682
683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684{
685 int i;
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
690 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 i == 0))
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400694 ql_enable_completion_interrupt(qdev, i);
695 }
696
697}
698
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700{
701 int status, i;
702 u16 csum = 0;
703 __le16 *flash = (__le16 *)&qdev->flash;
704
705 status = strncmp((char *)&qdev->flash, str, 4);
706 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000708 return status;
709 }
710
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
713
714 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000717
718 return csum;
719}
720
Ron Mercer26351472009-02-02 13:53:57 -0800721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400722{
723 int status = 0;
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727 if (status)
728 goto exit;
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734 if (status)
735 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
739 */
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400741exit:
742 return status;
743}
744
Ron Mercercdca8d02009-03-02 08:07:31 +0000745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746{
747 u32 i, size;
748 int status;
749 __le32 *p = (__le32 *)&qdev->flash;
750 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000751 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000752
753 /* Get flash offset for function and adjust
754 * for dword access.
755 */
Ron Mercere4552f52009-06-09 05:39:32 +0000756 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 else
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762 return -ETIMEDOUT;
763
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
767 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000770 goto exit;
771 }
772 }
773
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
776 "8000");
777 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000779 status = -EINVAL;
780 goto exit;
781 }
782
Ron Mercer542512e2009-06-09 05:39:33 +0000783 /* Extract either manufacturer or BOFM modified
784 * MAC address.
785 */
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 memcpy(mac_addr,
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
790 else
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
794
795 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000797 status = -EINVAL;
798 goto exit;
799 }
800
801 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000802 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000803 qdev->ndev->addr_len);
804
805exit:
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
807 return status;
808}
809
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400811{
812 int i;
813 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800814 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800815 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800817
818 /* Second function's parameters follow the first
819 * function's.
820 */
Ron Mercere4552f52009-06-09 05:39:32 +0000821 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000822 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400823
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825 return -ETIMEDOUT;
826
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000827 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800828 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400829 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400832 goto exit;
833 }
834
835 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000836
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
839 "8012");
840 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000842 status = -EINVAL;
843 goto exit;
844 }
845
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847 status = -EINVAL;
848 goto exit;
849 }
850
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
854
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400855exit:
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
857 return status;
858}
859
860/* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
863 */
864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865{
866 int status;
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 return status;
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
876 return status;
877}
878
879/* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
882 */
883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884{
885 int status = 0;
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889 if (status)
890 goto exit;
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896 if (status)
897 goto exit;
898 /* get the data */
899 *data = ql_read32(qdev, XGMAC_DATA);
900exit:
901 return status;
902}
903
904/* This is used for reading the 64-bit statistics regs. */
905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906{
907 int status = 0;
908 u32 hi = 0;
909 u32 lo = 0;
910
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
912 if (status)
913 goto exit;
914
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916 if (status)
917 goto exit;
918
919 *data = (u64) lo | ((u64) hi << 32);
920
921exit:
922 return status;
923}
924
Ron Mercercdca8d02009-03-02 08:07:31 +0000925static int ql_8000_port_initialize(struct ql_adapter *qdev)
926{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000927 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000928 /*
929 * Get MPI firmware version for driver banner
930 * and ethool info.
931 */
932 status = ql_mb_about_fw(qdev);
933 if (status)
934 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000935 status = ql_mb_get_fw_state(qdev);
936 if (status)
937 goto exit;
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940exit:
941 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000942}
943
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400944/* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
948 * later date.
949 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000950static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400951{
952 int status = 0;
953 u32 data;
954
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
958 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400965 }
966 return status;
967 }
968
Joe Perchesae9540f72010-02-09 11:49:52 +0000969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972 if (status)
973 goto end;
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976 if (status)
977 goto end;
978
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985 if (status)
986 goto end;
987
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990 if (status)
991 goto end;
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995 if (status)
996 goto end;
997
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000 if (status)
1001 goto end;
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005 if (status)
1006 goto end;
1007
1008 /* Turn on jumbo. */
1009 status =
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011 if (status)
1012 goto end;
1013 status =
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015 if (status)
1016 goto end;
1017
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022 return status;
1023}
1024
Ron Mercer7c734352009-10-19 03:32:19 +00001025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001030/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001032{
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1038 return lbq_desc;
1039}
1040
Ron Mercer7c734352009-10-19 03:32:19 +00001041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1043{
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001047 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1050
1051 /* If it's the last chunk of our master page then
1052 * we unmap it.
1053 */
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1060 return lbq_desc;
1061}
1062
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001063/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001065{
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1071 return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1082 }
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
Ron Mercer7c734352009-10-19 03:32:19 +00001090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1092{
1093 if (!rx_ring->pg_chunk.page) {
1094 u64 map;
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 GFP_ATOMIC,
1097 qdev->lbq_buf_order);
1098 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001099 netif_err(qdev, drv, qdev->ndev,
1100 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001101 return -ENOMEM;
1102 }
1103 rx_ring->pg_chunk.offset = 0;
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 0, ql_lbq_block_size(qdev),
1106 PCI_DMA_FROMDEVICE);
1107 if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 __free_pages(rx_ring->pg_chunk.page,
1109 qdev->lbq_buf_order);
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00001110 rx_ring->pg_chunk.page = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00001111 netif_err(qdev, drv, qdev->ndev,
1112 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001113 return -ENOMEM;
1114 }
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 }
1118
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1121 */
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124 /* Adjust the master page chunk for next
1125 * buffer get.
1126 */
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1131 } else {
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1135 }
1136 return 0;
1137}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
Ron Mercer49f21862009-02-23 10:42:16 +00001141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001143 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 u64 map;
1145 int i;
1146
Ron Mercer7c734352009-10-19 03:32:19 +00001147 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 "lbq: try cleaning clean_idx = %d.\n",
1151 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001152 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001155 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001156 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001158 return;
1159 }
Ron Mercer7c734352009-10-19 03:32:19 +00001160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001163 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001165 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001166 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
Ron Mercer49f21862009-02-23 10:42:16 +00001195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001203 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001208 netif_printk(qdev, rx_status, KERN_DEBUG,
1209 qdev->ndev,
1210 "sbq: getting new skb for index %d.\n",
1211 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001212 sbq_desc->p.skb =
1213 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001214 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001215 if (sbq_desc->p.skb == NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001436/* Categorizing receive firmware frame errors */
Sritej Velagaae721f32013-04-18 19:49:52 +00001437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001439{
1440 struct nic_stats *stats = &qdev->nic_stats;
1441
1442 stats->rx_err_count++;
Sritej Velagaae721f32013-04-18 19:49:52 +00001443 rx_ring->rx_errors++;
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001444
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1448 break;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1451 break;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1454 break;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1457 break;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1460 break;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1463 default:
1464 break;
1465 }
1466}
1467
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001468/**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
1472static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 void *page, size_t *len)
1475{
1476 u16 *tags;
1477
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479 return;
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481 tags = (u16 *)page;
1482 /* Look for stacked vlan tags in ethertype field */
1483 if (tags[6] == ETH_P_8021Q &&
1484 tags[8] == ETH_P_8021Q)
1485 *len += 2 * VLAN_HLEN;
1486 else
1487 *len += VLAN_HLEN;
1488 }
1489}
1490
Ron Mercer4f848c02010-01-02 10:37:43 +00001491/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001492static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497{
1498 struct sk_buff *skb;
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001500 struct napi_struct *napi = &rx_ring->napi;
1501
Sritej Velagaae721f32013-04-18 19:49:52 +00001502 /* Frame error, so drop the packet. */
1503 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 put_page(lbq_desc->p.pg_chunk.page);
1506 return;
1507 }
Ron Mercer63526712010-01-02 10:37:44 +00001508 napi->dev = qdev->ndev;
1509
1510 skb = napi_get_frags(napi);
1511 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001512 netif_err(qdev, drv, qdev->ndev,
1513 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001514 rx_ring->rx_dropped++;
1515 put_page(lbq_desc->p.pg_chunk.page);
1516 return;
1517 }
1518 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001519 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 lbq_desc->p.pg_chunk.page,
1521 lbq_desc->p.pg_chunk.offset,
1522 length);
Ron Mercer63526712010-01-02 10:37:44 +00001523
1524 skb->len += length;
1525 skb->data_len += length;
1526 skb->truesize += length;
1527 skb_shinfo(skb)->nr_frags++;
1528
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += length;
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001533 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001535 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001536}
1537
1538/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001539static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 struct rx_ring *rx_ring,
1541 struct ib_mac_iocb_rsp *ib_mac_rsp,
1542 u32 length,
1543 u16 vlan_id)
1544{
1545 struct net_device *ndev = qdev->ndev;
1546 struct sk_buff *skb = NULL;
1547 void *addr;
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 struct napi_struct *napi = &rx_ring->napi;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001550 size_t hlen = ETH_HLEN;
Ron Mercer4f848c02010-01-02 10:37:43 +00001551
1552 skb = netdev_alloc_skb(ndev, length);
1553 if (!skb) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 rx_ring->rx_dropped++;
1555 put_page(lbq_desc->p.pg_chunk.page);
1556 return;
1557 }
1558
1559 addr = lbq_desc->p.pg_chunk.va;
1560 prefetch(addr);
1561
Sritej Velagaae721f32013-04-18 19:49:52 +00001562 /* Frame error, so drop the packet. */
1563 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565 goto err_out;
1566 }
1567
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001568 /* Update the MAC header length*/
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
Ron Mercer4f848c02010-01-02 10:37:43 +00001571 /* The max framesize filter on this chip is set higher than
1572 * MTU since FCoE uses 2k frames.
1573 */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001574 if (skb->len > ndev->mtu + hlen) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001575 netif_err(qdev, drv, qdev->ndev,
1576 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001577 rx_ring->rx_dropped++;
1578 goto err_out;
1579 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001580 memcpy(skb_put(skb, hlen), addr, hlen);
Joe Perchesae9540f72010-02-09 11:49:52 +00001581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001585 lbq_desc->p.pg_chunk.offset + hlen,
1586 length - hlen);
1587 skb->len += length - hlen;
1588 skb->data_len += length - hlen;
1589 skb->truesize += length - hlen;
Ron Mercer4f848c02010-01-02 10:37:43 +00001590
1591 rx_ring->rx_packets++;
1592 rx_ring->rx_bytes += skb->len;
1593 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001594 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001595
Michał Mirosław88230fd2011-04-18 13:31:21 +00001596 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001597 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598 /* TCP frame. */
1599 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001606 struct iphdr *iph =
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001607 (struct iphdr *)((u8 *)addr + hlen);
Ron Mercer4f848c02010-01-02 10:37:43 +00001608 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001609 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001610 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001611 netif_printk(qdev, rx_status, KERN_DEBUG,
1612 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001613 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001614 }
1615 }
1616 }
1617
1618 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001619 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001621 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 napi_gro_receive(napi, skb);
1623 else
1624 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001625 return;
1626err_out:
1627 dev_kfree_skb_any(skb);
1628 put_page(lbq_desc->p.pg_chunk.page);
1629}
1630
1631/* Process an inbound completion from an rx ring. */
1632static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 struct rx_ring *rx_ring,
1634 struct ib_mac_iocb_rsp *ib_mac_rsp,
1635 u32 length,
1636 u16 vlan_id)
1637{
1638 struct net_device *ndev = qdev->ndev;
1639 struct sk_buff *skb = NULL;
1640 struct sk_buff *new_skb = NULL;
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643 skb = sbq_desc->p.skb;
1644 /* Allocate new_skb and copy */
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 if (new_skb == NULL) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001647 rx_ring->rx_dropped++;
1648 return;
1649 }
1650 skb_reserve(new_skb, NET_IP_ALIGN);
Manish Chopra2c9a2662016-03-15 07:13:45 -04001651
1652 pci_dma_sync_single_for_cpu(qdev->pdev,
1653 dma_unmap_addr(sbq_desc, mapaddr),
1654 dma_unmap_len(sbq_desc, maplen),
1655 PCI_DMA_FROMDEVICE);
1656
Ron Mercer4f848c02010-01-02 10:37:43 +00001657 memcpy(skb_put(new_skb, length), skb->data, length);
Manish Chopra2c9a2662016-03-15 07:13:45 -04001658
1659 pci_dma_sync_single_for_device(qdev->pdev,
1660 dma_unmap_addr(sbq_desc, mapaddr),
1661 dma_unmap_len(sbq_desc, maplen),
1662 PCI_DMA_FROMDEVICE);
Ron Mercer4f848c02010-01-02 10:37:43 +00001663 skb = new_skb;
1664
Sritej Velagaae721f32013-04-18 19:49:52 +00001665 /* Frame error, so drop the packet. */
1666 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1667 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1668 dev_kfree_skb_any(skb);
1669 return;
1670 }
1671
Ron Mercer4f848c02010-01-02 10:37:43 +00001672 /* loopback self test for ethtool */
1673 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1674 ql_check_lb_frame(qdev, skb);
1675 dev_kfree_skb_any(skb);
1676 return;
1677 }
1678
1679 /* The max framesize filter on this chip is set higher than
1680 * MTU since FCoE uses 2k frames.
1681 */
1682 if (skb->len > ndev->mtu + ETH_HLEN) {
1683 dev_kfree_skb_any(skb);
1684 rx_ring->rx_dropped++;
1685 return;
1686 }
1687
1688 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001689 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001690 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691 "%s Multicast.\n",
1692 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1693 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1694 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1696 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001698 }
1699 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001700 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1701 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001702
1703 rx_ring->rx_packets++;
1704 rx_ring->rx_bytes += skb->len;
1705 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001706 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001707
1708 /* If rx checksum is on, and there are no
1709 * csum or frame errors.
1710 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001711 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001712 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713 /* TCP frame. */
1714 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001715 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1716 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001717 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720 /* Unfragmented ipv4 UDP frame. */
1721 struct iphdr *iph = (struct iphdr *) skb->data;
1722 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001723 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001724 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001725 netif_printk(qdev, rx_status, KERN_DEBUG,
1726 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001727 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001728 }
1729 }
1730 }
1731
1732 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001733 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001735 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1736 napi_gro_receive(&rx_ring->napi, skb);
1737 else
1738 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001739}
1740
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001741static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001742{
1743 void *temp_addr = skb->data;
1744
1745 /* Undo the skb_reserve(skb,32) we did before
1746 * giving to hardware, and realign data on
1747 * a 2-byte boundary.
1748 */
1749 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1750 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1751 skb_copy_to_linear_data(skb, temp_addr,
1752 (unsigned int)len);
1753}
1754
1755/*
1756 * This function builds an skb for the given inbound
1757 * completion. It will be rewritten for readability in the near
1758 * future, but for not it works well.
1759 */
1760static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1761 struct rx_ring *rx_ring,
1762 struct ib_mac_iocb_rsp *ib_mac_rsp)
1763{
1764 struct bq_desc *lbq_desc;
1765 struct bq_desc *sbq_desc;
1766 struct sk_buff *skb = NULL;
1767 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001768 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1769 size_t hlen = ETH_HLEN;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001770
1771 /*
1772 * Handle the header buffer if present.
1773 */
1774 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1775 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001776 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1777 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001778 /*
1779 * Headers fit nicely into a small buffer.
1780 */
1781 sbq_desc = ql_get_curr_sbuf(rx_ring);
1782 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001783 dma_unmap_addr(sbq_desc, mapaddr),
1784 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001785 PCI_DMA_FROMDEVICE);
1786 skb = sbq_desc->p.skb;
1787 ql_realign_skb(skb, hdr_len);
1788 skb_put(skb, hdr_len);
1789 sbq_desc->p.skb = NULL;
1790 }
1791
1792 /*
1793 * Handle the data buffer(s).
1794 */
1795 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001798 return skb;
1799 }
1800
1801 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1802 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001803 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804 "Headers in small, data of %d bytes in small, combine them.\n",
1805 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 /*
1807 * Data is less than small buffer size so it's
1808 * stuffed in a small buffer.
1809 * For this case we append the data
1810 * from the "data" small buffer to the "header" small
1811 * buffer.
1812 */
1813 sbq_desc = ql_get_curr_sbuf(rx_ring);
1814 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001815 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001817 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001818 (sbq_desc, maplen),
1819 PCI_DMA_FROMDEVICE);
1820 memcpy(skb_put(skb, length),
1821 sbq_desc->p.skb->data, length);
1822 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001823 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 (sbq_desc,
1825 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001826 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001827 (sbq_desc,
1828 maplen),
1829 PCI_DMA_FROMDEVICE);
1830 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001831 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832 "%d bytes in a single small buffer.\n",
1833 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001834 sbq_desc = ql_get_curr_sbuf(rx_ring);
1835 skb = sbq_desc->p.skb;
1836 ql_realign_skb(skb, length);
1837 skb_put(skb, length);
1838 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001839 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001840 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001841 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001842 maplen),
1843 PCI_DMA_FROMDEVICE);
1844 sbq_desc->p.skb = NULL;
1845 }
1846 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1847 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001848 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1849 "Header in small, %d bytes in large. Chain large to small!\n",
1850 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001851 /*
1852 * The data is in a single large buffer. We
1853 * chain it to the header buffer's skb and let
1854 * it rip.
1855 */
Ron Mercer7c734352009-10-19 03:32:19 +00001856 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001857 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1858 "Chaining page at offset = %d, for %d bytes to skb.\n",
1859 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001860 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1861 lbq_desc->p.pg_chunk.offset,
1862 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 skb->len += length;
1864 skb->data_len += length;
1865 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 } else {
1867 /*
1868 * The headers and data are in a single large buffer. We
1869 * copy it to a new skb and let it go. This can happen with
1870 * jumbo mtu on a non-TCP/UDP frame.
1871 */
Ron Mercer7c734352009-10-19 03:32:19 +00001872 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001873 skb = netdev_alloc_skb(qdev->ndev, length);
1874 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001875 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1876 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001877 return NULL;
1878 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001879 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001880 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001881 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001882 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001883 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001884 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001885 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1886 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1887 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001888 skb_fill_page_desc(skb, 0,
1889 lbq_desc->p.pg_chunk.page,
1890 lbq_desc->p.pg_chunk.offset,
1891 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001892 skb->len += length;
1893 skb->data_len += length;
1894 skb->truesize += length;
1895 length -= length;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001896 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1897 lbq_desc->p.pg_chunk.va,
1898 &hlen);
1899 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001900 }
1901 } else {
1902 /*
1903 * The data is in a chain of large buffers
1904 * pointed to by a small buffer. We loop
1905 * thru and chain them to the our small header
1906 * buffer's skb.
1907 * frags: There are 18 max frags and our small
1908 * buffer will hold 32 of them. The thing is,
1909 * we'll use 3 max for our 9000 byte jumbo
1910 * frames. If the MTU goes up we could
1911 * eventually be in trouble.
1912 */
Ron Mercer7c734352009-10-19 03:32:19 +00001913 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001914 sbq_desc = ql_get_curr_sbuf(rx_ring);
1915 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001916 dma_unmap_addr(sbq_desc, mapaddr),
1917 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001918 PCI_DMA_FROMDEVICE);
1919 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1920 /*
1921 * This is an non TCP/UDP IP frame, so
1922 * the headers aren't split into a small
1923 * buffer. We have to use the small buffer
1924 * that contains our sg list as our skb to
1925 * send upstairs. Copy the sg list here to
1926 * a local buffer and use it to find the
1927 * pages to chain.
1928 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001929 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1930 "%d bytes of headers & data in chain of large.\n",
1931 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001932 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001933 sbq_desc->p.skb = NULL;
1934 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001935 }
Harish Patilafe6e002014-09-18 17:27:24 -04001936 do {
Ron Mercer7c734352009-10-19 03:32:19 +00001937 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1938 size = (length < rx_ring->lbq_buf_size) ? length :
1939 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001940
Joe Perchesae9540f72010-02-09 11:49:52 +00001941 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1942 "Adding page %d to skb for %d bytes.\n",
1943 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001944 skb_fill_page_desc(skb, i,
1945 lbq_desc->p.pg_chunk.page,
1946 lbq_desc->p.pg_chunk.offset,
1947 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001948 skb->len += size;
1949 skb->data_len += size;
1950 skb->truesize += size;
1951 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001952 i++;
Harish Patilafe6e002014-09-18 17:27:24 -04001953 } while (length > 0);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001954 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1955 &hlen);
1956 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001957 }
1958 return skb;
1959}
1960
1961/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001962static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001963 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001964 struct ib_mac_iocb_rsp *ib_mac_rsp,
1965 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001966{
1967 struct net_device *ndev = qdev->ndev;
1968 struct sk_buff *skb = NULL;
1969
1970 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1971
1972 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1973 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001974 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1975 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001976 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001977 return;
1978 }
1979
Sritej Velagaae721f32013-04-18 19:49:52 +00001980 /* Frame error, so drop the packet. */
1981 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1982 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1983 dev_kfree_skb_any(skb);
1984 return;
1985 }
1986
Ron Mercerec33a492009-06-09 05:39:28 +00001987 /* The max framesize filter on this chip is set higher than
1988 * MTU since FCoE uses 2k frames.
1989 */
1990 if (skb->len > ndev->mtu + ETH_HLEN) {
1991 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001992 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001993 return;
1994 }
1995
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001996 /* loopback self test for ethtool */
1997 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1998 ql_check_lb_frame(qdev, skb);
1999 dev_kfree_skb_any(skb);
2000 return;
2001 }
2002
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002003 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002004 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2006 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2008 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2009 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2010 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2011 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00002012 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002013 }
2014 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2016 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002017 }
Ron Mercerd555f592009-03-09 10:59:19 +00002018
Ron Mercerd555f592009-03-09 10:59:19 +00002019 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002020 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002021
2022 /* If rx checksum is on, and there are no
2023 * csum or frame errors.
2024 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00002025 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00002026 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2027 /* TCP frame. */
2028 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002029 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2030 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002031 skb->ip_summed = CHECKSUM_UNNECESSARY;
2032 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2033 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2034 /* Unfragmented ipv4 UDP frame. */
2035 struct iphdr *iph = (struct iphdr *) skb->data;
2036 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00002037 htons(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002038 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002039 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2040 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002041 }
2042 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002043 }
Ron Mercerd555f592009-03-09 10:59:19 +00002044
Ron Mercer885ee392009-11-03 13:49:31 +00002045 rx_ring->rx_packets++;
2046 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002047 skb_record_rx_queue(skb, rx_ring->cq_id);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002048 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002049 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002050 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2051 napi_gro_receive(&rx_ring->napi, skb);
2052 else
2053 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002054}
2055
Ron Mercer4f848c02010-01-02 10:37:43 +00002056/* Process an inbound completion from an rx ring. */
2057static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2058 struct rx_ring *rx_ring,
2059 struct ib_mac_iocb_rsp *ib_mac_rsp)
2060{
2061 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002062 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2063 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
Ron Mercer4f848c02010-01-02 10:37:43 +00002064 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2065 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2066
2067 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2068
2069 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2070 /* The data and headers are split into
2071 * separate buffers.
2072 */
2073 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2074 vlan_id);
2075 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2076 /* The data fit in a single small buffer.
2077 * Allocate a new skb, copy the data and
2078 * return the buffer to the free pool.
2079 */
2080 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2081 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002082 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2083 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2084 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2085 /* TCP packet in a page chunk that's been checksummed.
2086 * Tack it on to our GRO skb and let it go.
2087 */
2088 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2089 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002090 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2091 /* Non-TCP packet in a page chunk. Allocate an
2092 * skb, tack it on frags, and send it up.
2093 */
2094 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2095 length, vlan_id);
2096 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002097 /* Non-TCP/UDP large frames that span multiple buffers
2098 * can be processed corrrectly by the split frame logic.
2099 */
2100 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2101 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002102 }
2103
2104 return (unsigned long)length;
2105}
2106
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002107/* Process an outbound completion from an rx ring. */
2108static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2109 struct ob_mac_iocb_rsp *mac_rsp)
2110{
2111 struct tx_ring *tx_ring;
2112 struct tx_ring_desc *tx_ring_desc;
2113
2114 QL_DUMP_OB_MAC_RSP(mac_rsp);
2115 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2116 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2117 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002118 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2119 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002120 dev_kfree_skb(tx_ring_desc->skb);
2121 tx_ring_desc->skb = NULL;
2122
2123 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2124 OB_MAC_IOCB_RSP_S |
2125 OB_MAC_IOCB_RSP_L |
2126 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002130 }
2131 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002132 netif_warn(qdev, tx_done, qdev->ndev,
2133 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002134 }
2135 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002136 netif_warn(qdev, tx_done, qdev->ndev,
2137 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002138 }
2139 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002140 netif_warn(qdev, tx_done, qdev->ndev,
2141 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002142 }
2143 }
2144 atomic_inc(&tx_ring->tx_count);
2145}
2146
2147/* Fire up a handler to reset the MPI processor. */
2148void ql_queue_fw_error(struct ql_adapter *qdev)
2149{
Ron Mercer6a473302009-07-02 06:06:12 +00002150 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002151 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2152}
2153
2154void ql_queue_asic_error(struct ql_adapter *qdev)
2155{
Ron Mercer6a473302009-07-02 06:06:12 +00002156 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002157 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002158 /* Clear adapter up bit to signal the recovery
2159 * process that it shouldn't kill the reset worker
2160 * thread
2161 */
2162 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002163 /* Set asic recovery bit to indicate reset process that we are
2164 * in fatal error recovery process rather than normal close
2165 */
2166 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2168}
2169
2170static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2171 struct ib_ae_iocb_rsp *ib_ae_rsp)
2172{
2173 switch (ib_ae_rsp->event) {
2174 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002175 netif_err(qdev, rx_err, qdev->ndev,
2176 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002177 ql_queue_fw_error(qdev);
2178 return;
2179
2180 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002181 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2182 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002183 ql_queue_asic_error(qdev);
2184 return;
2185
2186 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002187 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002188 ql_queue_asic_error(qdev);
2189 break;
2190
2191 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002192 netdev_err(qdev->ndev, "PCI error occurred when reading "
2193 "anonymous buffers from rx_ring %d.\n",
2194 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002195 ql_queue_asic_error(qdev);
2196 break;
2197
2198 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002199 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2200 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201 ql_queue_asic_error(qdev);
2202 break;
2203 }
2204}
2205
2206static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2207{
2208 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002209 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002210 struct ob_mac_iocb_rsp *net_rsp = NULL;
2211 int count = 0;
2212
Ron Mercer1e213302009-03-09 10:59:21 +00002213 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002214 /* While there are entries in the completion queue. */
2215 while (prod != rx_ring->cnsmr_idx) {
2216
Joe Perchesae9540f72010-02-09 11:49:52 +00002217 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2218 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2219 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002220
2221 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2222 rmb();
2223 switch (net_rsp->opcode) {
2224
2225 case OPCODE_OB_MAC_TSO_IOCB:
2226 case OPCODE_OB_MAC_IOCB:
2227 ql_process_mac_tx_intr(qdev, net_rsp);
2228 break;
2229 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002230 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2232 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002233 }
2234 count++;
2235 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002236 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002237 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002238 if (!net_rsp)
2239 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002240 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002241 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002242 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002243 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002244 /*
2245 * The queue got stopped because the tx_ring was full.
2246 * Wake it up, because it's now at least 25% empty.
2247 */
Ron Mercer1e213302009-03-09 10:59:21 +00002248 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002249 }
2250
2251 return count;
2252}
2253
2254static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2255{
2256 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002257 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002258 struct ql_net_rsp_iocb *net_rsp;
2259 int count = 0;
2260
2261 /* While there are entries in the completion queue. */
2262 while (prod != rx_ring->cnsmr_idx) {
2263
Joe Perchesae9540f72010-02-09 11:49:52 +00002264 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2265 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2266 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002267
2268 net_rsp = rx_ring->curr_entry;
2269 rmb();
2270 switch (net_rsp->opcode) {
2271 case OPCODE_IB_MAC_IOCB:
2272 ql_process_mac_rx_intr(qdev, rx_ring,
2273 (struct ib_mac_iocb_rsp *)
2274 net_rsp);
2275 break;
2276
2277 case OPCODE_IB_AE_IOCB:
2278 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2279 net_rsp);
2280 break;
2281 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002282 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2283 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2284 net_rsp->opcode);
2285 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002286 }
2287 count++;
2288 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002289 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002290 if (count == budget)
2291 break;
2292 }
2293 ql_update_buffer_queues(qdev, rx_ring);
2294 ql_write_cq_idx(rx_ring);
2295 return count;
2296}
2297
2298static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2299{
2300 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2301 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002302 struct rx_ring *trx_ring;
2303 int i, work_done = 0;
2304 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002305
Joe Perchesae9540f72010-02-09 11:49:52 +00002306 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2307 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002308
Ron Mercer39aa8162009-08-27 11:02:11 +00002309 /* Service the TX rings first. They start
2310 * right after the RSS rings. */
2311 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2312 trx_ring = &qdev->rx_ring[i];
2313 /* If this TX completion ring belongs to this vector and
2314 * it's not empty then service it.
2315 */
2316 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2317 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2318 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002319 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2320 "%s: Servicing TX completion ring %d.\n",
2321 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002322 ql_clean_outbound_rx_ring(trx_ring);
2323 }
2324 }
2325
2326 /*
2327 * Now service the RSS ring if it's active.
2328 */
2329 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2330 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002331 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2332 "%s: Servicing RX completion ring %d.\n",
2333 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002334 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2335 }
2336
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002337 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002338 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002339 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2340 }
2341 return work_done;
2342}
2343
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002344static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002345{
2346 struct ql_adapter *qdev = netdev_priv(ndev);
2347
Patrick McHardyf6469682013-04-19 02:04:27 +00002348 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002349 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002350 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002351 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002352 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2353 }
2354}
2355
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002356/**
2357 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2358 * based on the features to enable/disable hardware vlan accel
2359 */
2360static int qlge_update_hw_vlan_features(struct net_device *ndev,
2361 netdev_features_t features)
2362{
2363 struct ql_adapter *qdev = netdev_priv(ndev);
2364 int status = 0;
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002365 bool need_restart = netif_running(ndev);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002366
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002367 if (need_restart) {
2368 status = ql_adapter_down(qdev);
2369 if (status) {
2370 netif_err(qdev, link, qdev->ndev,
2371 "Failed to bring down the adapter\n");
2372 return status;
2373 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002374 }
2375
2376 /* update the features with resent change */
2377 ndev->features = features;
2378
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002379 if (need_restart) {
2380 status = ql_adapter_up(qdev);
2381 if (status) {
2382 netif_err(qdev, link, qdev->ndev,
2383 "Failed to bring up the adapter\n");
2384 return status;
2385 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002386 }
Marcelo Leitner61132bf2015-01-30 09:56:01 -02002387
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002388 return status;
2389}
2390
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002391static netdev_features_t qlge_fix_features(struct net_device *ndev,
2392 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002393{
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002394 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002395
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002396 /* Update the behavior of vlan accel in the adapter */
2397 err = qlge_update_hw_vlan_features(ndev, features);
2398 if (err)
2399 return err;
2400
Jiri Pirko18c49b92011-07-21 03:24:11 +00002401 return features;
2402}
2403
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002404static int qlge_set_features(struct net_device *ndev,
2405 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002406{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002407 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002408
Patrick McHardyf6469682013-04-19 02:04:27 +00002409 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002410 qlge_vlan_mode(ndev, features);
2411
2412 return 0;
2413}
2414
Jiri Pirko8e586132011-12-08 19:52:37 -05002415static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002416{
2417 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002418 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002419
Jiri Pirko8e586132011-12-08 19:52:37 -05002420 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2421 MAC_ADDR_TYPE_VLAN, vid);
2422 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002423 netif_err(qdev, ifup, qdev->ndev,
2424 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002425 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002426}
2427
Patrick McHardy80d5c362013-04-19 02:04:28 +00002428static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002429{
2430 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002431 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002432 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002433
Ron Mercercc288f52009-02-23 10:42:14 +00002434 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2435 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002436 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002437
Jiri Pirko8e586132011-12-08 19:52:37 -05002438 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002439 set_bit(vid, qdev->active_vlans);
2440
Ron Mercercc288f52009-02-23 10:42:14 +00002441 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002442
2443 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002444}
2445
Jiri Pirko8e586132011-12-08 19:52:37 -05002446static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002447{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002448 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002449 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002450
Jiri Pirko8e586132011-12-08 19:52:37 -05002451 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2452 MAC_ADDR_TYPE_VLAN, vid);
2453 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002454 netif_err(qdev, ifup, qdev->ndev,
2455 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002456 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002457}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002458
Patrick McHardy80d5c362013-04-19 02:04:28 +00002459static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002460{
2461 struct ql_adapter *qdev = netdev_priv(ndev);
2462 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002463 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002464
2465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2466 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002467 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002468
Jiri Pirko8e586132011-12-08 19:52:37 -05002469 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002470 clear_bit(vid, qdev->active_vlans);
2471
2472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002473
2474 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002475}
2476
Ron Mercerc1b60092010-10-27 04:58:12 +00002477static void qlge_restore_vlan(struct ql_adapter *qdev)
2478{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002479 int status;
2480 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002481
Jiri Pirko18c49b92011-07-21 03:24:11 +00002482 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2483 if (status)
2484 return;
2485
2486 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2487 __qlge_vlan_rx_add_vid(qdev, vid);
2488
2489 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002490}
2491
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002492/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2493static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2494{
2495 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002496 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002497 return IRQ_HANDLED;
2498}
2499
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002500/* This handles a fatal error, MPI activity, and the default
2501 * rx_ring in an MSI-X multiple vector environment.
2502 * In MSI/Legacy environment it also process the rest of
2503 * the rx_rings.
2504 */
2505static irqreturn_t qlge_isr(int irq, void *dev_id)
2506{
2507 struct rx_ring *rx_ring = dev_id;
2508 struct ql_adapter *qdev = rx_ring->qdev;
2509 struct intr_context *intr_context = &qdev->intr_context[0];
2510 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002511 int work_done = 0;
2512
Ron Mercerbb0d2152008-10-20 10:30:26 -07002513 spin_lock(&qdev->hw_lock);
2514 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002515 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2516 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002517 spin_unlock(&qdev->hw_lock);
2518 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002519 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002520 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002521
Ron Mercerbb0d2152008-10-20 10:30:26 -07002522 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002523
2524 /*
2525 * Check for fatal error.
2526 */
2527 if (var & STS_FE) {
2528 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002529 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002530 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002531 netdev_err(qdev->ndev, "Resetting chip. "
2532 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002533 return IRQ_HANDLED;
2534 }
2535
2536 /*
2537 * Check MPI processor activity.
2538 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002539 if ((var & STS_PI) &&
2540 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002541 /*
2542 * We've got an async event or mailbox completion.
2543 * Handle it and clear the source of the interrupt.
2544 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002545 netif_err(qdev, intr, qdev->ndev,
2546 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002547 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002548 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2549 queue_delayed_work_on(smp_processor_id(),
2550 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002551 work_done++;
2552 }
2553
2554 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002555 * Get the bit-mask that shows the active queues for this
2556 * pass. Compare it to the queues that this irq services
2557 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002558 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002559 var = ql_read32(qdev, ISR1);
2560 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002561 netif_info(qdev, intr, qdev->ndev,
2562 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002563 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002564 napi_schedule(&rx_ring->napi);
2565 work_done++;
2566 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002567 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002568 return work_done ? IRQ_HANDLED : IRQ_NONE;
2569}
2570
2571static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2572{
2573
2574 if (skb_is_gso(skb)) {
2575 int err;
Vlad Yasevich1ee1cfe2014-08-25 10:34:55 -04002576 __be16 l3_proto = vlan_get_protocol(skb);
françois romieubb9689e2014-03-29 12:26:27 +01002577
2578 err = skb_cow_head(skb, 0);
2579 if (err < 0)
2580 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002581
2582 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2583 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2584 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2585 mac_iocb_ptr->total_hdrs_len =
2586 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2587 mac_iocb_ptr->net_trans_offset =
2588 cpu_to_le16(skb_network_offset(skb) |
2589 skb_transport_offset(skb)
2590 << OB_MAC_TRANSPORT_HDR_SHIFT);
2591 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2592 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
Vlad Yasevich1ee1cfe2014-08-25 10:34:55 -04002593 if (likely(l3_proto == htons(ETH_P_IP))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002594 struct iphdr *iph = ip_hdr(skb);
2595 iph->check = 0;
2596 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2597 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2598 iph->daddr, 0,
2599 IPPROTO_TCP,
2600 0);
Vlad Yasevich1ee1cfe2014-08-25 10:34:55 -04002601 } else if (l3_proto == htons(ETH_P_IPV6)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002602 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2603 tcp_hdr(skb)->check =
2604 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2605 &ipv6_hdr(skb)->daddr,
2606 0, IPPROTO_TCP, 0);
2607 }
2608 return 1;
2609 }
2610 return 0;
2611}
2612
2613static void ql_hw_csum_setup(struct sk_buff *skb,
2614 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2615{
2616 int len;
2617 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002618 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002619 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2620 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2621 mac_iocb_ptr->net_trans_offset =
2622 cpu_to_le16(skb_network_offset(skb) |
2623 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2624
2625 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2626 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2627 if (likely(iph->protocol == IPPROTO_TCP)) {
2628 check = &(tcp_hdr(skb)->check);
2629 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2630 mac_iocb_ptr->total_hdrs_len =
2631 cpu_to_le16(skb_transport_offset(skb) +
2632 (tcp_hdr(skb)->doff << 2));
2633 } else {
2634 check = &(udp_hdr(skb)->check);
2635 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2636 mac_iocb_ptr->total_hdrs_len =
2637 cpu_to_le16(skb_transport_offset(skb) +
2638 sizeof(struct udphdr));
2639 }
2640 *check = ~csum_tcpudp_magic(iph->saddr,
2641 iph->daddr, len, iph->protocol, 0);
2642}
2643
Stephen Hemminger613573252009-08-31 19:50:58 +00002644static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002645{
2646 struct tx_ring_desc *tx_ring_desc;
2647 struct ob_mac_iocb_req *mac_iocb_ptr;
2648 struct ql_adapter *qdev = netdev_priv(ndev);
2649 int tso;
2650 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002651 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002652
2653 tx_ring = &qdev->tx_ring[tx_ring_idx];
2654
Ron Mercer74c50b42009-03-09 10:59:27 +00002655 if (skb_padto(skb, ETH_ZLEN))
2656 return NETDEV_TX_OK;
2657
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002658 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002659 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002660 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002661 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002662 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002663 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002664 return NETDEV_TX_BUSY;
2665 }
2666 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2667 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002668 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002669
2670 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2671 mac_iocb_ptr->tid = tx_ring_desc->index;
2672 /* We use the upper 32-bits to store the tx queue for this IO.
2673 * When we get the completion we can use it to establish the context.
2674 */
2675 mac_iocb_ptr->txq_idx = tx_ring_idx;
2676 tx_ring_desc->skb = skb;
2677
2678 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2679
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002680 if (skb_vlan_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002681 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002682 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002683 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002684 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002685 }
2686 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2687 if (tso < 0) {
2688 dev_kfree_skb_any(skb);
2689 return NETDEV_TX_OK;
2690 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2691 ql_hw_csum_setup(skb,
2692 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2693 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002694 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2695 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002696 netif_err(qdev, tx_queued, qdev->ndev,
2697 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002698 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002699 return NETDEV_TX_BUSY;
2700 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002701 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2702 tx_ring->prod_idx++;
2703 if (tx_ring->prod_idx == tx_ring->wq_len)
2704 tx_ring->prod_idx = 0;
2705 wmb();
2706
2707 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002708 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2709 "tx queued, slot %d, len %d\n",
2710 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002711
2712 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002713
2714 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2715 netif_stop_subqueue(ndev, tx_ring->wq_id);
2716 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2717 /*
2718 * The queue got stopped because the tx_ring was full.
2719 * Wake it up, because it's now at least 25% empty.
2720 */
2721 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2722 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002723 return NETDEV_TX_OK;
2724}
2725
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002726
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002727static void ql_free_shadow_space(struct ql_adapter *qdev)
2728{
2729 if (qdev->rx_ring_shadow_reg_area) {
2730 pci_free_consistent(qdev->pdev,
2731 PAGE_SIZE,
2732 qdev->rx_ring_shadow_reg_area,
2733 qdev->rx_ring_shadow_reg_dma);
2734 qdev->rx_ring_shadow_reg_area = NULL;
2735 }
2736 if (qdev->tx_ring_shadow_reg_area) {
2737 pci_free_consistent(qdev->pdev,
2738 PAGE_SIZE,
2739 qdev->tx_ring_shadow_reg_area,
2740 qdev->tx_ring_shadow_reg_dma);
2741 qdev->tx_ring_shadow_reg_area = NULL;
2742 }
2743}
2744
2745static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2746{
2747 qdev->rx_ring_shadow_reg_area =
Joe Perches440c7342014-08-08 14:24:34 -07002748 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2749 &qdev->rx_ring_shadow_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002750 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002751 netif_err(qdev, ifup, qdev->ndev,
2752 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002753 return -ENOMEM;
2754 }
Joe Perches440c7342014-08-08 14:24:34 -07002755
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002756 qdev->tx_ring_shadow_reg_area =
Joe Perches440c7342014-08-08 14:24:34 -07002757 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2758 &qdev->tx_ring_shadow_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002759 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002760 netif_err(qdev, ifup, qdev->ndev,
2761 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002762 goto err_wqp_sh_area;
2763 }
2764 return 0;
2765
2766err_wqp_sh_area:
2767 pci_free_consistent(qdev->pdev,
2768 PAGE_SIZE,
2769 qdev->rx_ring_shadow_reg_area,
2770 qdev->rx_ring_shadow_reg_dma);
2771 return -ENOMEM;
2772}
2773
2774static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2775{
2776 struct tx_ring_desc *tx_ring_desc;
2777 int i;
2778 struct ob_mac_iocb_req *mac_iocb_ptr;
2779
2780 mac_iocb_ptr = tx_ring->wq_base;
2781 tx_ring_desc = tx_ring->q;
2782 for (i = 0; i < tx_ring->wq_len; i++) {
2783 tx_ring_desc->index = i;
2784 tx_ring_desc->skb = NULL;
2785 tx_ring_desc->queue_entry = mac_iocb_ptr;
2786 mac_iocb_ptr++;
2787 tx_ring_desc++;
2788 }
2789 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002790}
2791
2792static void ql_free_tx_resources(struct ql_adapter *qdev,
2793 struct tx_ring *tx_ring)
2794{
2795 if (tx_ring->wq_base) {
2796 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2797 tx_ring->wq_base, tx_ring->wq_base_dma);
2798 tx_ring->wq_base = NULL;
2799 }
2800 kfree(tx_ring->q);
2801 tx_ring->q = NULL;
2802}
2803
2804static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2805 struct tx_ring *tx_ring)
2806{
2807 tx_ring->wq_base =
2808 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2809 &tx_ring->wq_base_dma);
2810
Joe Perches8e95a202009-12-03 07:58:21 +00002811 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002812 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2813 goto pci_alloc_err;
2814
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002815 tx_ring->q =
2816 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2817 if (tx_ring->q == NULL)
2818 goto err;
2819
2820 return 0;
2821err:
2822 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2823 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002824 tx_ring->wq_base = NULL;
2825pci_alloc_err:
2826 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002827 return -ENOMEM;
2828}
2829
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002830static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002831{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002832 struct bq_desc *lbq_desc;
2833
Ron Mercer7c734352009-10-19 03:32:19 +00002834 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002835
Ron Mercer7c734352009-10-19 03:32:19 +00002836 curr_idx = rx_ring->lbq_curr_idx;
2837 clean_idx = rx_ring->lbq_clean_idx;
2838 while (curr_idx != clean_idx) {
2839 lbq_desc = &rx_ring->lbq[curr_idx];
2840
2841 if (lbq_desc->p.pg_chunk.last_flag) {
2842 pci_unmap_page(qdev->pdev,
2843 lbq_desc->p.pg_chunk.map,
2844 ql_lbq_block_size(qdev),
2845 PCI_DMA_FROMDEVICE);
2846 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002847 }
Ron Mercer7c734352009-10-19 03:32:19 +00002848
2849 put_page(lbq_desc->p.pg_chunk.page);
2850 lbq_desc->p.pg_chunk.page = NULL;
2851
2852 if (++curr_idx == rx_ring->lbq_len)
2853 curr_idx = 0;
2854
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002855 }
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00002856 if (rx_ring->pg_chunk.page) {
2857 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2858 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2859 put_page(rx_ring->pg_chunk.page);
2860 rx_ring->pg_chunk.page = NULL;
2861 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002862}
2863
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002864static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002865{
2866 int i;
2867 struct bq_desc *sbq_desc;
2868
2869 for (i = 0; i < rx_ring->sbq_len; i++) {
2870 sbq_desc = &rx_ring->sbq[i];
2871 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002872 netif_err(qdev, ifup, qdev->ndev,
2873 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002874 return;
2875 }
2876 if (sbq_desc->p.skb) {
2877 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002878 dma_unmap_addr(sbq_desc, mapaddr),
2879 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002880 PCI_DMA_FROMDEVICE);
2881 dev_kfree_skb(sbq_desc->p.skb);
2882 sbq_desc->p.skb = NULL;
2883 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002884 }
2885}
2886
Ron Mercer4545a3f2009-02-23 10:42:17 +00002887/* Free all large and small rx buffers associated
2888 * with the completion queues for this device.
2889 */
2890static void ql_free_rx_buffers(struct ql_adapter *qdev)
2891{
2892 int i;
2893 struct rx_ring *rx_ring;
2894
2895 for (i = 0; i < qdev->rx_ring_count; i++) {
2896 rx_ring = &qdev->rx_ring[i];
2897 if (rx_ring->lbq)
2898 ql_free_lbq_buffers(qdev, rx_ring);
2899 if (rx_ring->sbq)
2900 ql_free_sbq_buffers(qdev, rx_ring);
2901 }
2902}
2903
2904static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2905{
2906 struct rx_ring *rx_ring;
2907 int i;
2908
2909 for (i = 0; i < qdev->rx_ring_count; i++) {
2910 rx_ring = &qdev->rx_ring[i];
2911 if (rx_ring->type != TX_Q)
2912 ql_update_buffer_queues(qdev, rx_ring);
2913 }
2914}
2915
2916static void ql_init_lbq_ring(struct ql_adapter *qdev,
2917 struct rx_ring *rx_ring)
2918{
2919 int i;
2920 struct bq_desc *lbq_desc;
2921 __le64 *bq = rx_ring->lbq_base;
2922
2923 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2924 for (i = 0; i < rx_ring->lbq_len; i++) {
2925 lbq_desc = &rx_ring->lbq[i];
2926 memset(lbq_desc, 0, sizeof(*lbq_desc));
2927 lbq_desc->index = i;
2928 lbq_desc->addr = bq;
2929 bq++;
2930 }
2931}
2932
2933static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002934 struct rx_ring *rx_ring)
2935{
2936 int i;
2937 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002938 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002939
Ron Mercer4545a3f2009-02-23 10:42:17 +00002940 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941 for (i = 0; i < rx_ring->sbq_len; i++) {
2942 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002943 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002944 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002945 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002946 bq++;
2947 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002948}
2949
2950static void ql_free_rx_resources(struct ql_adapter *qdev,
2951 struct rx_ring *rx_ring)
2952{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002953 /* Free the small buffer queue. */
2954 if (rx_ring->sbq_base) {
2955 pci_free_consistent(qdev->pdev,
2956 rx_ring->sbq_size,
2957 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2958 rx_ring->sbq_base = NULL;
2959 }
2960
2961 /* Free the small buffer queue control blocks. */
2962 kfree(rx_ring->sbq);
2963 rx_ring->sbq = NULL;
2964
2965 /* Free the large buffer queue. */
2966 if (rx_ring->lbq_base) {
2967 pci_free_consistent(qdev->pdev,
2968 rx_ring->lbq_size,
2969 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2970 rx_ring->lbq_base = NULL;
2971 }
2972
2973 /* Free the large buffer queue control blocks. */
2974 kfree(rx_ring->lbq);
2975 rx_ring->lbq = NULL;
2976
2977 /* Free the rx queue. */
2978 if (rx_ring->cq_base) {
2979 pci_free_consistent(qdev->pdev,
2980 rx_ring->cq_size,
2981 rx_ring->cq_base, rx_ring->cq_base_dma);
2982 rx_ring->cq_base = NULL;
2983 }
2984}
2985
2986/* Allocate queues and buffers for this completions queue based
2987 * on the values in the parameter structure. */
2988static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2989 struct rx_ring *rx_ring)
2990{
2991
2992 /*
2993 * Allocate the completion queue for this rx_ring.
2994 */
2995 rx_ring->cq_base =
2996 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2997 &rx_ring->cq_base_dma);
2998
2999 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003000 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003001 return -ENOMEM;
3002 }
3003
3004 if (rx_ring->sbq_len) {
3005 /*
3006 * Allocate small buffer queue.
3007 */
3008 rx_ring->sbq_base =
3009 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3010 &rx_ring->sbq_base_dma);
3011
3012 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003013 netif_err(qdev, ifup, qdev->ndev,
3014 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003015 goto err_mem;
3016 }
3017
3018 /*
3019 * Allocate small buffer queue control blocks.
3020 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003021 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3022 sizeof(struct bq_desc),
3023 GFP_KERNEL);
3024 if (rx_ring->sbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003025 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003026
Ron Mercer4545a3f2009-02-23 10:42:17 +00003027 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003028 }
3029
3030 if (rx_ring->lbq_len) {
3031 /*
3032 * Allocate large buffer queue.
3033 */
3034 rx_ring->lbq_base =
3035 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3036 &rx_ring->lbq_base_dma);
3037
3038 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003039 netif_err(qdev, ifup, qdev->ndev,
3040 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003041 goto err_mem;
3042 }
3043 /*
3044 * Allocate large buffer queue control blocks.
3045 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003046 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3047 sizeof(struct bq_desc),
3048 GFP_KERNEL);
3049 if (rx_ring->lbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003050 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003051
Ron Mercer4545a3f2009-02-23 10:42:17 +00003052 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003053 }
3054
3055 return 0;
3056
3057err_mem:
3058 ql_free_rx_resources(qdev, rx_ring);
3059 return -ENOMEM;
3060}
3061
3062static void ql_tx_ring_clean(struct ql_adapter *qdev)
3063{
3064 struct tx_ring *tx_ring;
3065 struct tx_ring_desc *tx_ring_desc;
3066 int i, j;
3067
3068 /*
3069 * Loop through all queues and free
3070 * any resources.
3071 */
3072 for (j = 0; j < qdev->tx_ring_count; j++) {
3073 tx_ring = &qdev->tx_ring[j];
3074 for (i = 0; i < tx_ring->wq_len; i++) {
3075 tx_ring_desc = &tx_ring->q[i];
3076 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003077 netif_err(qdev, ifdown, qdev->ndev,
3078 "Freeing lost SKB %p, from queue %d, index %d.\n",
3079 tx_ring_desc->skb, j,
3080 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003081 ql_unmap_send(qdev, tx_ring_desc,
3082 tx_ring_desc->map_cnt);
3083 dev_kfree_skb(tx_ring_desc->skb);
3084 tx_ring_desc->skb = NULL;
3085 }
3086 }
3087 }
3088}
3089
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003090static void ql_free_mem_resources(struct ql_adapter *qdev)
3091{
3092 int i;
3093
3094 for (i = 0; i < qdev->tx_ring_count; i++)
3095 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3096 for (i = 0; i < qdev->rx_ring_count; i++)
3097 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3098 ql_free_shadow_space(qdev);
3099}
3100
3101static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3102{
3103 int i;
3104
3105 /* Allocate space for our shadow registers and such. */
3106 if (ql_alloc_shadow_space(qdev))
3107 return -ENOMEM;
3108
3109 for (i = 0; i < qdev->rx_ring_count; i++) {
3110 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003111 netif_err(qdev, ifup, qdev->ndev,
3112 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003113 goto err_mem;
3114 }
3115 }
3116 /* Allocate tx queue resources */
3117 for (i = 0; i < qdev->tx_ring_count; i++) {
3118 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003119 netif_err(qdev, ifup, qdev->ndev,
3120 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003121 goto err_mem;
3122 }
3123 }
3124 return 0;
3125
3126err_mem:
3127 ql_free_mem_resources(qdev);
3128 return -ENOMEM;
3129}
3130
3131/* Set up the rx ring control block and pass it to the chip.
3132 * The control block is defined as
3133 * "Completion Queue Initialization Control Block", or cqicb.
3134 */
3135static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3136{
3137 struct cqicb *cqicb = &rx_ring->cqicb;
3138 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003139 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003140 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003141 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 void __iomem *doorbell_area =
3143 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3144 int err = 0;
3145 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003146 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003147 __le64 *base_indirect_ptr;
3148 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003149
3150 /* Set up the shadow registers for this ring. */
3151 rx_ring->prod_idx_sh_reg = shadow_reg;
3152 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003153 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 shadow_reg += sizeof(u64);
3155 shadow_reg_dma += sizeof(u64);
3156 rx_ring->lbq_base_indirect = shadow_reg;
3157 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003158 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3159 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003160 rx_ring->sbq_base_indirect = shadow_reg;
3161 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3162
3163 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003164 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003165 rx_ring->cnsmr_idx = 0;
3166 rx_ring->curr_entry = rx_ring->cq_base;
3167
3168 /* PCI doorbell mem area + 0x04 for valid register */
3169 rx_ring->valid_db_reg = doorbell_area + 0x04;
3170
3171 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003172 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003173
3174 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003175 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003176
3177 memset((void *)cqicb, 0, sizeof(struct cqicb));
3178 cqicb->msix_vect = rx_ring->irq;
3179
Ron Mercer459caf52009-01-04 17:08:11 -08003180 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3181 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003182
Ron Mercer97345522009-01-09 11:31:50 +00003183 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003184
Ron Mercer97345522009-01-09 11:31:50 +00003185 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003186
3187 /*
3188 * Set up the control block load flags.
3189 */
3190 cqicb->flags = FLAGS_LC | /* Load queue base address */
3191 FLAGS_LV | /* Load MSI-X vector */
3192 FLAGS_LI; /* Load irq delay values */
3193 if (rx_ring->lbq_len) {
3194 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003195 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003196 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003197 page_entries = 0;
3198 do {
3199 *base_indirect_ptr = cpu_to_le64(tmp);
3200 tmp += DB_PAGE_SIZE;
3201 base_indirect_ptr++;
3202 page_entries++;
3203 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003204 cqicb->lbq_addr =
3205 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003206 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3207 (u16) rx_ring->lbq_buf_size;
3208 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3209 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3210 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003211 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003212 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003213 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003214 rx_ring->lbq_clean_idx = 0;
3215 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003216 }
3217 if (rx_ring->sbq_len) {
3218 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003219 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003220 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003221 page_entries = 0;
3222 do {
3223 *base_indirect_ptr = cpu_to_le64(tmp);
3224 tmp += DB_PAGE_SIZE;
3225 base_indirect_ptr++;
3226 page_entries++;
3227 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003228 cqicb->sbq_addr =
3229 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003231 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003232 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3233 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003234 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003235 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003236 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003237 rx_ring->sbq_clean_idx = 0;
3238 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003239 }
3240 switch (rx_ring->type) {
3241 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003242 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3243 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3244 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003245 case RX_Q:
3246 /* Inbound completion handling rx_rings run in
3247 * separate NAPI contexts.
3248 */
3249 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3250 64);
3251 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3252 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3253 break;
3254 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003255 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3256 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003257 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003258 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3259 CFG_LCQ, rx_ring->cq_id);
3260 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003261 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003262 return err;
3263 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003264 return err;
3265}
3266
3267static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3268{
3269 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3270 void __iomem *doorbell_area =
3271 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3272 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3273 (tx_ring->wq_id * sizeof(u64));
3274 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3275 (tx_ring->wq_id * sizeof(u64));
3276 int err = 0;
3277
3278 /*
3279 * Assign doorbell registers for this tx_ring.
3280 */
3281 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003282 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003283 tx_ring->prod_idx = 0;
3284 /* TX PCI doorbell mem area + 0x04 */
3285 tx_ring->valid_db_reg = doorbell_area + 0x04;
3286
3287 /*
3288 * Assign shadow registers for this tx_ring.
3289 */
3290 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3291 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3292
3293 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3294 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3295 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3296 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3297 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003298 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003299
Ron Mercer97345522009-01-09 11:31:50 +00003300 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003301
3302 ql_init_tx_ring(qdev, tx_ring);
3303
Ron Mercere3324712009-07-02 06:06:13 +00003304 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003305 (u16) tx_ring->wq_id);
3306 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003307 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003308 return err;
3309 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003310 return err;
3311}
3312
3313static void ql_disable_msix(struct ql_adapter *qdev)
3314{
3315 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3316 pci_disable_msix(qdev->pdev);
3317 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3318 kfree(qdev->msi_x_entry);
3319 qdev->msi_x_entry = NULL;
3320 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3321 pci_disable_msi(qdev->pdev);
3322 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3323 }
3324}
3325
Ron Mercera4ab6132009-08-27 11:02:10 +00003326/* We start by trying to get the number of vectors
3327 * stored in qdev->intr_count. If we don't get that
3328 * many then we reduce the count and try again.
3329 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003330static void ql_enable_msix(struct ql_adapter *qdev)
3331{
Ron Mercera4ab6132009-08-27 11:02:10 +00003332 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003333
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003334 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003335 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003336 /* Try to alloc space for the msix struct,
3337 * if it fails then go to MSI/legacy.
3338 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003339 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003340 sizeof(struct msix_entry),
3341 GFP_KERNEL);
3342 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003343 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003344 goto msi;
3345 }
3346
Ron Mercera4ab6132009-08-27 11:02:10 +00003347 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003348 qdev->msi_x_entry[i].entry = i;
3349
Alexander Gordeev50b483a2014-02-18 11:11:59 +01003350 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3351 1, qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003352 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003353 kfree(qdev->msi_x_entry);
3354 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003355 netif_warn(qdev, ifup, qdev->ndev,
3356 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera5a62a12009-11-11 12:54:05 +00003357 qlge_irq_type = MSI_IRQ;
Alexander Gordeev50b483a2014-02-18 11:11:59 +01003358 } else {
3359 qdev->intr_count = err;
Ron Mercera4ab6132009-08-27 11:02:10 +00003360 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003361 netif_info(qdev, ifup, qdev->ndev,
3362 "MSI-X Enabled, got %d vectors.\n",
3363 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003364 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003365 }
3366 }
3367msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003368 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003369 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003370 if (!pci_enable_msi(qdev->pdev)) {
3371 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003372 netif_info(qdev, ifup, qdev->ndev,
3373 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003374 return;
3375 }
3376 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003377 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003378 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3379 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003380}
3381
Ron Mercer39aa8162009-08-27 11:02:11 +00003382/* Each vector services 1 RSS ring and and 1 or more
3383 * TX completion rings. This function loops through
3384 * the TX completion rings and assigns the vector that
3385 * will service it. An example would be if there are
3386 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3387 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003388 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003389 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3390 */
3391static void ql_set_tx_vect(struct ql_adapter *qdev)
3392{
3393 int i, j, vect;
3394 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3395
3396 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3397 /* Assign irq vectors to TX rx_rings.*/
3398 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3399 i < qdev->rx_ring_count; i++) {
3400 if (j == tx_rings_per_vector) {
3401 vect++;
3402 j = 0;
3403 }
3404 qdev->rx_ring[i].irq = vect;
3405 j++;
3406 }
3407 } else {
3408 /* For single vector all rings have an irq
3409 * of zero.
3410 */
3411 for (i = 0; i < qdev->rx_ring_count; i++)
3412 qdev->rx_ring[i].irq = 0;
3413 }
3414}
3415
3416/* Set the interrupt mask for this vector. Each vector
3417 * will service 1 RSS ring and 1 or more TX completion
3418 * rings. This function sets up a bit mask per vector
3419 * that indicates which rings it services.
3420 */
3421static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3422{
3423 int j, vect = ctx->intr;
3424 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3425
3426 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3427 /* Add the RSS ring serviced by this vector
3428 * to the mask.
3429 */
3430 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3431 /* Add the TX ring(s) serviced by this vector
3432 * to the mask. */
3433 for (j = 0; j < tx_rings_per_vector; j++) {
3434 ctx->irq_mask |=
3435 (1 << qdev->rx_ring[qdev->rss_ring_count +
3436 (vect * tx_rings_per_vector) + j].cq_id);
3437 }
3438 } else {
3439 /* For single vector we just shift each queue's
3440 * ID into the mask.
3441 */
3442 for (j = 0; j < qdev->rx_ring_count; j++)
3443 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3444 }
3445}
3446
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003447/*
3448 * Here we build the intr_context structures based on
3449 * our rx_ring count and intr vector count.
3450 * The intr_context structure is used to hook each vector
3451 * to possibly different handlers.
3452 */
3453static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3454{
3455 int i = 0;
3456 struct intr_context *intr_context = &qdev->intr_context[0];
3457
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003458 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3459 /* Each rx_ring has it's
3460 * own intr_context since we have separate
3461 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003462 */
3463 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3464 qdev->rx_ring[i].irq = i;
3465 intr_context->intr = i;
3466 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003467 /* Set up this vector's bit-mask that indicates
3468 * which queues it services.
3469 */
3470 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003471 /*
3472 * We set up each vectors enable/disable/read bits so
3473 * there's no bit/mask calculations in the critical path.
3474 */
3475 intr_context->intr_en_mask =
3476 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3477 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3478 | i;
3479 intr_context->intr_dis_mask =
3480 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3481 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3482 INTR_EN_IHD | i;
3483 intr_context->intr_read_mask =
3484 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3485 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3486 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003487 if (i == 0) {
3488 /* The first vector/queue handles
3489 * broadcast/multicast, fatal errors,
3490 * and firmware events. This in addition
3491 * to normal inbound NAPI processing.
3492 */
3493 intr_context->handler = qlge_isr;
3494 sprintf(intr_context->name, "%s-rx-%d",
3495 qdev->ndev->name, i);
3496 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003497 /*
3498 * Inbound queues handle unicast frames only.
3499 */
3500 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003501 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003502 qdev->ndev->name, i);
3503 }
3504 }
3505 } else {
3506 /*
3507 * All rx_rings use the same intr_context since
3508 * there is only one vector.
3509 */
3510 intr_context->intr = 0;
3511 intr_context->qdev = qdev;
3512 /*
3513 * We set up each vectors enable/disable/read bits so
3514 * there's no bit/mask calculations in the critical path.
3515 */
3516 intr_context->intr_en_mask =
3517 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3518 intr_context->intr_dis_mask =
3519 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3520 INTR_EN_TYPE_DISABLE;
3521 intr_context->intr_read_mask =
3522 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3523 /*
3524 * Single interrupt means one handler for all rings.
3525 */
3526 intr_context->handler = qlge_isr;
3527 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003528 /* Set up this vector's bit-mask that indicates
3529 * which queues it services. In this case there is
3530 * a single vector so it will service all RSS and
3531 * TX completion rings.
3532 */
3533 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003534 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003535 /* Tell the TX completion rings which MSIx vector
3536 * they will be using.
3537 */
3538 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003539}
3540
3541static void ql_free_irq(struct ql_adapter *qdev)
3542{
3543 int i;
3544 struct intr_context *intr_context = &qdev->intr_context[0];
3545
3546 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3547 if (intr_context->hooked) {
3548 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3549 free_irq(qdev->msi_x_entry[i].vector,
3550 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003551 } else {
3552 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553 }
3554 }
3555 }
3556 ql_disable_msix(qdev);
3557}
3558
3559static int ql_request_irq(struct ql_adapter *qdev)
3560{
3561 int i;
3562 int status = 0;
3563 struct pci_dev *pdev = qdev->pdev;
3564 struct intr_context *intr_context = &qdev->intr_context[0];
3565
3566 ql_resolve_queues_to_irqs(qdev);
3567
3568 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3569 atomic_set(&intr_context->irq_cnt, 0);
3570 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3571 status = request_irq(qdev->msi_x_entry[i].vector,
3572 intr_context->handler,
3573 0,
3574 intr_context->name,
3575 &qdev->rx_ring[i]);
3576 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003577 netif_err(qdev, ifup, qdev->ndev,
3578 "Failed request for MSIX interrupt %d.\n",
3579 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003580 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003581 }
3582 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003583 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3584 "trying msi or legacy interrupts.\n");
3585 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3586 "%s: irq = %d.\n", __func__, pdev->irq);
3587 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3588 "%s: context->name = %s.\n", __func__,
3589 intr_context->name);
3590 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3591 "%s: dev_id = 0x%p.\n", __func__,
3592 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003593 status =
3594 request_irq(pdev->irq, qlge_isr,
3595 test_bit(QL_MSI_ENABLED,
3596 &qdev->
3597 flags) ? 0 : IRQF_SHARED,
3598 intr_context->name, &qdev->rx_ring[0]);
3599 if (status)
3600 goto err_irq;
3601
Joe Perchesae9540f72010-02-09 11:49:52 +00003602 netif_err(qdev, ifup, qdev->ndev,
3603 "Hooked intr %d, queue type %s, with name %s.\n",
3604 i,
3605 qdev->rx_ring[0].type == DEFAULT_Q ?
3606 "DEFAULT_Q" :
3607 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3608 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3609 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003610 }
3611 intr_context->hooked = 1;
3612 }
3613 return status;
3614err_irq:
Joe Perchesa42c3a22014-04-24 18:50:59 -07003615 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003616 ql_free_irq(qdev);
3617 return status;
3618}
3619
3620static int ql_start_rss(struct ql_adapter *qdev)
3621{
Joe Perches215faf92010-12-21 02:16:10 -08003622 static const u8 init_hash_seed[] = {
3623 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3624 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3625 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3626 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3627 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3628 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003629 struct ricb *ricb = &qdev->ricb;
3630 int status = 0;
3631 int i;
3632 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3633
Ron Mercere3324712009-07-02 06:06:13 +00003634 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003635
Ron Mercerb2014ff2009-08-27 11:02:09 +00003636 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003637 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003638 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3639 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003640
3641 /*
3642 * Fill out the Indirection Table.
3643 */
Ron Mercer541ae282009-10-08 09:54:37 +00003644 for (i = 0; i < 1024; i++)
3645 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003646
Ron Mercer541ae282009-10-08 09:54:37 +00003647 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3648 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003649
Ron Mercere3324712009-07-02 06:06:13 +00003650 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003651 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003652 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003653 return status;
3654 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003655 return status;
3656}
3657
Ron Mercera5f59dc2009-07-02 06:06:07 +00003658static int ql_clear_routing_entries(struct ql_adapter *qdev)
3659{
3660 int i, status = 0;
3661
3662 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3663 if (status)
3664 return status;
3665 /* Clear all the entries in the routing table. */
3666 for (i = 0; i < 16; i++) {
3667 status = ql_set_routing_reg(qdev, i, 0, 0);
3668 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003669 netif_err(qdev, ifup, qdev->ndev,
3670 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003671 break;
3672 }
3673 }
3674 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3675 return status;
3676}
3677
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003678/* Initialize the frame-to-queue routing. */
3679static int ql_route_initialize(struct ql_adapter *qdev)
3680{
3681 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003682
3683 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003684 status = ql_clear_routing_entries(qdev);
3685 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003686 return status;
3687
3688 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3689 if (status)
3690 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003691
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003692 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3693 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003694 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003695 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003696 "Failed to init routing register "
3697 "for IP CSUM error packets.\n");
3698 goto exit;
3699 }
3700 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3701 RT_IDX_TU_CSUM_ERR, 1);
3702 if (status) {
3703 netif_err(qdev, ifup, qdev->ndev,
3704 "Failed to init routing register "
3705 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003706 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003707 }
3708 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3709 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003710 netif_err(qdev, ifup, qdev->ndev,
3711 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003712 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003713 }
3714 /* If we have more than one inbound queue, then turn on RSS in the
3715 * routing block.
3716 */
3717 if (qdev->rss_ring_count > 1) {
3718 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3719 RT_IDX_RSS_MATCH, 1);
3720 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003721 netif_err(qdev, ifup, qdev->ndev,
3722 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003723 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003724 }
3725 }
3726
3727 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3728 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003729 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003730 netif_err(qdev, ifup, qdev->ndev,
3731 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003732exit:
3733 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003734 return status;
3735}
3736
Ron Mercer2ee1e272009-03-03 12:10:33 +00003737int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003738{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003739 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003740
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003741 /* If check if the link is up and use to
3742 * determine if we are setting or clearing
3743 * the MAC address in the CAM.
3744 */
3745 set = ql_read32(qdev, STS);
3746 set &= qdev->port_link_up;
3747 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003748 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003749 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003750 return status;
3751 }
3752
3753 status = ql_route_initialize(qdev);
3754 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003755 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003756
3757 return status;
3758}
3759
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003760static int ql_adapter_initialize(struct ql_adapter *qdev)
3761{
3762 u32 value, mask;
3763 int i;
3764 int status = 0;
3765
3766 /*
3767 * Set up the System register to halt on errors.
3768 */
3769 value = SYS_EFE | SYS_FAE;
3770 mask = value << 16;
3771 ql_write32(qdev, SYS, mask | value);
3772
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003773 /* Set the default queue, and VLAN behavior. */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04003774 value = NIC_RCV_CFG_DFQ;
3775 mask = NIC_RCV_CFG_DFQ_MASK;
3776 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3777 value |= NIC_RCV_CFG_RV;
3778 mask |= (NIC_RCV_CFG_RV << 16);
3779 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003780 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3781
3782 /* Set the MPI interrupt to enabled. */
3783 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3784
3785 /* Enable the function, set pagesize, enable error checking. */
3786 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003787 FSC_EC | FSC_VM_PAGE_4K;
3788 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003789
3790 /* Set/clear header splitting. */
3791 mask = FSC_VM_PAGESIZE_MASK |
3792 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3793 ql_write32(qdev, FSC, mask | value);
3794
Ron Mercer572c5262010-01-02 10:37:42 +00003795 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003796
Ron Mercera3b71932009-10-08 09:54:38 +00003797 /* Set RX packet routing to use port/pci function on which the
3798 * packet arrived on in addition to usual frame routing.
3799 * This is helpful on bonding where both interfaces can have
3800 * the same MAC address.
3801 */
3802 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003803 /* Reroute all packets to our Interface.
3804 * They may have been routed to MPI firmware
3805 * due to WOL.
3806 */
3807 value = ql_read32(qdev, MGMT_RCV_CFG);
3808 value &= ~MGMT_RCV_CFG_RM;
3809 mask = 0xffff0000;
3810
3811 /* Sticky reg needs clearing due to WOL. */
3812 ql_write32(qdev, MGMT_RCV_CFG, mask);
3813 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3814
3815 /* Default WOL is enable on Mezz cards */
3816 if (qdev->pdev->subsystem_device == 0x0068 ||
3817 qdev->pdev->subsystem_device == 0x0180)
3818 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003819
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003820 /* Start up the rx queues. */
3821 for (i = 0; i < qdev->rx_ring_count; i++) {
3822 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3823 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003824 netif_err(qdev, ifup, qdev->ndev,
3825 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003826 return status;
3827 }
3828 }
3829
3830 /* If there is more than one inbound completion queue
3831 * then download a RICB to configure RSS.
3832 */
3833 if (qdev->rss_ring_count > 1) {
3834 status = ql_start_rss(qdev);
3835 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003836 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003837 return status;
3838 }
3839 }
3840
3841 /* Start up the tx queues. */
3842 for (i = 0; i < qdev->tx_ring_count; i++) {
3843 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3844 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003845 netif_err(qdev, ifup, qdev->ndev,
3846 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003847 return status;
3848 }
3849 }
3850
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003851 /* Initialize the port and set the max framesize. */
3852 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003853 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003854 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003855
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003856 /* Set up the MAC address and frame routing filter. */
3857 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003858 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003859 netif_err(qdev, ifup, qdev->ndev,
3860 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003861 return status;
3862 }
3863
3864 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003865 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003866 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003867
3868 return status;
3869}
3870
3871/* Issue soft reset to chip. */
3872static int ql_adapter_reset(struct ql_adapter *qdev)
3873{
3874 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003875 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003876 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003877
Ron Mercera5f59dc2009-07-02 06:06:07 +00003878 /* Clear all the entries in the routing table. */
3879 status = ql_clear_routing_entries(qdev);
3880 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003881 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003882 return status;
3883 }
3884
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003885 /* Check if bit is set then skip the mailbox command and
3886 * clear the bit, else we are in normal reset process.
3887 */
3888 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3889 /* Stop management traffic. */
3890 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003891
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003892 /* Wait for the NIC and MGMNT FIFOs to empty. */
3893 ql_wait_fifo_empty(qdev);
3894 } else
3895 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003896
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003897 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003898
Joe Perches3f6e7852015-05-19 21:44:52 -07003899 end_jiffies = jiffies + usecs_to_jiffies(30);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003900 do {
3901 value = ql_read32(qdev, RST_FO);
3902 if ((value & RST_FO_FR) == 0)
3903 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003904 cpu_relax();
3905 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003906
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003907 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003908 netif_err(qdev, ifdown, qdev->ndev,
3909 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003910 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003911 }
3912
Ron Mercer84087f42009-10-08 09:54:41 +00003913 /* Resume management traffic. */
3914 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003915 return status;
3916}
3917
3918static void ql_display_dev_info(struct net_device *ndev)
3919{
Joe Perchesb16fed02010-11-15 11:12:28 +00003920 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003921
Joe Perchesae9540f72010-02-09 11:49:52 +00003922 netif_info(qdev, probe, qdev->ndev,
3923 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3924 "XG Roll = %d, XG Rev = %d.\n",
3925 qdev->func,
3926 qdev->port,
3927 qdev->chip_rev_id & 0x0000000f,
3928 qdev->chip_rev_id >> 4 & 0x0000000f,
3929 qdev->chip_rev_id >> 8 & 0x0000000f,
3930 qdev->chip_rev_id >> 12 & 0x0000000f);
3931 netif_info(qdev, probe, qdev->ndev,
3932 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003933}
3934
stephen hemmingerac409212010-10-21 07:50:54 +00003935static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003936{
3937 int status = 0;
3938 u32 wol = MB_WOL_DISABLE;
3939
3940 /* The CAM is still intact after a reset, but if we
3941 * are doing WOL, then we may need to program the
3942 * routing regs. We would also need to issue the mailbox
3943 * commands to instruct the MPI what to do per the ethtool
3944 * settings.
3945 */
3946
3947 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3948 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003949 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003950 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003951 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003952 return -EINVAL;
3953 }
3954
3955 if (qdev->wol & WAKE_MAGIC) {
3956 status = ql_mb_wol_set_magic(qdev, 1);
3957 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003958 netif_err(qdev, ifdown, qdev->ndev,
3959 "Failed to set magic packet on %s.\n",
3960 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003961 return status;
3962 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003963 netif_info(qdev, drv, qdev->ndev,
3964 "Enabled magic packet successfully on %s.\n",
3965 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003966
3967 wol |= MB_WOL_MAGIC_PKT;
3968 }
3969
3970 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003971 wol |= MB_WOL_MODE_ON;
3972 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003973 netif_err(qdev, drv, qdev->ndev,
3974 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003975 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003976 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003977 }
3978
3979 return status;
3980}
3981
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003982static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003983{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003984
Ron Mercer6497b602009-02-12 16:37:13 -08003985 /* Don't kill the reset worker thread if we
3986 * are in the process of recovery.
3987 */
3988 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3989 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003990 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3991 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003992 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003993 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003994 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003995}
3996
3997static int ql_adapter_down(struct ql_adapter *qdev)
3998{
3999 int i, status = 0;
4000
4001 ql_link_off(qdev);
4002
4003 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004004
Ron Mercer39aa8162009-08-27 11:02:11 +00004005 for (i = 0; i < qdev->rss_ring_count; i++)
4006 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004007
4008 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4009
4010 ql_disable_interrupts(qdev);
4011
4012 ql_tx_ring_clean(qdev);
4013
Ron Mercer6b318cb2009-03-09 10:59:26 +00004014 /* Call netif_napi_del() from common point.
4015 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004016 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00004017 netif_napi_del(&qdev->rx_ring[i].napi);
4018
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004019 status = ql_adapter_reset(qdev);
4020 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004021 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4022 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00004023 ql_free_rx_buffers(qdev);
4024
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004025 return status;
4026}
4027
4028static int ql_adapter_up(struct ql_adapter *qdev)
4029{
4030 int err = 0;
4031
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004032 err = ql_adapter_initialize(qdev);
4033 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004034 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004035 goto err_init;
4036 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004037 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00004038 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00004039 /* If the port is initialized and the
4040 * link is up the turn on the carrier.
4041 */
4042 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4043 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00004044 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004045 /* Restore rx mode. */
4046 clear_bit(QL_ALLMULTI, &qdev->flags);
4047 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4048 qlge_set_multicast_list(qdev->ndev);
4049
Ron Mercerc1b60092010-10-27 04:58:12 +00004050 /* Restore vlan setting. */
4051 qlge_restore_vlan(qdev);
4052
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004053 ql_enable_interrupts(qdev);
4054 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004055 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004056
4057 return 0;
4058err_init:
4059 ql_adapter_reset(qdev);
4060 return err;
4061}
4062
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004063static void ql_release_adapter_resources(struct ql_adapter *qdev)
4064{
4065 ql_free_mem_resources(qdev);
4066 ql_free_irq(qdev);
4067}
4068
4069static int ql_get_adapter_resources(struct ql_adapter *qdev)
4070{
4071 int status = 0;
4072
4073 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004074 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004075 return -ENOMEM;
4076 }
4077 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004078 return status;
4079}
4080
4081static int qlge_close(struct net_device *ndev)
4082{
4083 struct ql_adapter *qdev = netdev_priv(ndev);
4084
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004085 /* If we hit pci_channel_io_perm_failure
4086 * failure condition, then we already
4087 * brought the adapter down.
4088 */
4089 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004090 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004091 clear_bit(QL_EEH_FATAL, &qdev->flags);
4092 return 0;
4093 }
4094
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004095 /*
4096 * Wait for device to recover from a reset.
4097 * (Rarely happens, but possible.)
4098 */
4099 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4100 msleep(1);
4101 ql_adapter_down(qdev);
4102 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004103 return 0;
4104}
4105
4106static int ql_configure_rings(struct ql_adapter *qdev)
4107{
4108 int i;
4109 struct rx_ring *rx_ring;
4110 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004111 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004112 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4113 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4114
4115 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004116
Ron Mercera4ab6132009-08-27 11:02:10 +00004117 /* In a perfect world we have one RSS ring for each CPU
4118 * and each has it's own vector. To do that we ask for
4119 * cpu_cnt vectors. ql_enable_msix() will adjust the
4120 * vector count to what we actually get. We then
4121 * allocate an RSS ring for each.
4122 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004123 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004124 qdev->intr_count = cpu_cnt;
4125 ql_enable_msix(qdev);
4126 /* Adjust the RSS ring count to the actual vector count. */
4127 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004128 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004129 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004130
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004131 for (i = 0; i < qdev->tx_ring_count; i++) {
4132 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004133 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004134 tx_ring->qdev = qdev;
4135 tx_ring->wq_id = i;
4136 tx_ring->wq_len = qdev->tx_ring_size;
4137 tx_ring->wq_size =
4138 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4139
4140 /*
4141 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004142 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004143 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004144 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004145 }
4146
4147 for (i = 0; i < qdev->rx_ring_count; i++) {
4148 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004149 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004150 rx_ring->qdev = qdev;
4151 rx_ring->cq_id = i;
4152 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004153 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004154 /*
4155 * Inbound (RSS) queues.
4156 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004157 rx_ring->cq_len = qdev->rx_ring_size;
4158 rx_ring->cq_size =
4159 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4160 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4161 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004162 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004163 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004164 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4165 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004166 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004167 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004168 rx_ring->type = RX_Q;
4169 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004170 /*
4171 * Outbound queue handles outbound completions only.
4172 */
4173 /* outbound cq is same size as tx_ring it services. */
4174 rx_ring->cq_len = qdev->tx_ring_size;
4175 rx_ring->cq_size =
4176 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4177 rx_ring->lbq_len = 0;
4178 rx_ring->lbq_size = 0;
4179 rx_ring->lbq_buf_size = 0;
4180 rx_ring->sbq_len = 0;
4181 rx_ring->sbq_size = 0;
4182 rx_ring->sbq_buf_size = 0;
4183 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004184 }
4185 }
4186 return 0;
4187}
4188
4189static int qlge_open(struct net_device *ndev)
4190{
4191 int err = 0;
4192 struct ql_adapter *qdev = netdev_priv(ndev);
4193
Ron Mercer74e12432009-11-11 12:54:04 +00004194 err = ql_adapter_reset(qdev);
4195 if (err)
4196 return err;
4197
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004198 err = ql_configure_rings(qdev);
4199 if (err)
4200 return err;
4201
4202 err = ql_get_adapter_resources(qdev);
4203 if (err)
4204 goto error_up;
4205
4206 err = ql_adapter_up(qdev);
4207 if (err)
4208 goto error_up;
4209
4210 return err;
4211
4212error_up:
4213 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004214 return err;
4215}
4216
Ron Mercer7c734352009-10-19 03:32:19 +00004217static int ql_change_rx_buffers(struct ql_adapter *qdev)
4218{
4219 struct rx_ring *rx_ring;
4220 int i, status;
4221 u32 lbq_buf_len;
4222
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004223 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004224 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Dan Carpenter351434c2015-12-15 13:52:36 +03004225 int i = 4;
4226
4227 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004228 netif_err(qdev, ifup, qdev->ndev,
4229 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004230 ssleep(1);
4231 }
4232
4233 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004234 netif_err(qdev, ifup, qdev->ndev,
4235 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004236 return -ETIMEDOUT;
4237 }
4238 }
4239
4240 status = ql_adapter_down(qdev);
4241 if (status)
4242 goto error;
4243
4244 /* Get the new rx buffer size. */
4245 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4246 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4247 qdev->lbq_buf_order = get_order(lbq_buf_len);
4248
4249 for (i = 0; i < qdev->rss_ring_count; i++) {
4250 rx_ring = &qdev->rx_ring[i];
4251 /* Set the new size. */
4252 rx_ring->lbq_buf_size = lbq_buf_len;
4253 }
4254
4255 status = ql_adapter_up(qdev);
4256 if (status)
4257 goto error;
4258
4259 return status;
4260error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004261 netif_alert(qdev, ifup, qdev->ndev,
4262 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004263 set_bit(QL_ADAPTER_UP, &qdev->flags);
4264 dev_close(qdev->ndev);
4265 return status;
4266}
4267
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004268static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4269{
4270 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004271 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004272
4273 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004274 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004275 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004276 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004277 } else
4278 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004279
4280 queue_delayed_work(qdev->workqueue,
4281 &qdev->mpi_port_cfg_work, 3*HZ);
4282
Breno Leitao746079d2010-02-04 10:11:19 +00004283 ndev->mtu = new_mtu;
4284
Ron Mercer7c734352009-10-19 03:32:19 +00004285 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004286 return 0;
4287 }
4288
Ron Mercer7c734352009-10-19 03:32:19 +00004289 status = ql_change_rx_buffers(qdev);
4290 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004291 netif_err(qdev, ifup, qdev->ndev,
4292 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004293 }
4294
4295 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004296}
4297
4298static struct net_device_stats *qlge_get_stats(struct net_device
4299 *ndev)
4300{
Ron Mercer885ee392009-11-03 13:49:31 +00004301 struct ql_adapter *qdev = netdev_priv(ndev);
4302 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4303 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4304 unsigned long pkts, mcast, dropped, errors, bytes;
4305 int i;
4306
4307 /* Get RX stats. */
4308 pkts = mcast = dropped = errors = bytes = 0;
4309 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4310 pkts += rx_ring->rx_packets;
4311 bytes += rx_ring->rx_bytes;
4312 dropped += rx_ring->rx_dropped;
4313 errors += rx_ring->rx_errors;
4314 mcast += rx_ring->rx_multicast;
4315 }
4316 ndev->stats.rx_packets = pkts;
4317 ndev->stats.rx_bytes = bytes;
4318 ndev->stats.rx_dropped = dropped;
4319 ndev->stats.rx_errors = errors;
4320 ndev->stats.multicast = mcast;
4321
4322 /* Get TX stats. */
4323 pkts = errors = bytes = 0;
4324 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4325 pkts += tx_ring->tx_packets;
4326 bytes += tx_ring->tx_bytes;
4327 errors += tx_ring->tx_errors;
4328 }
4329 ndev->stats.tx_packets = pkts;
4330 ndev->stats.tx_bytes = bytes;
4331 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004332 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004333}
4334
stephen hemmingerac409212010-10-21 07:50:54 +00004335static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004336{
Joe Perchesb16fed02010-11-15 11:12:28 +00004337 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004338 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004339 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004340
Ron Mercercc288f52009-02-23 10:42:14 +00004341 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4342 if (status)
4343 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004344 /*
4345 * Set or clear promiscuous mode if a
4346 * transition is taking place.
4347 */
4348 if (ndev->flags & IFF_PROMISC) {
4349 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4350 if (ql_set_routing_reg
4351 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004352 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004353 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004354 } else {
4355 set_bit(QL_PROMISCUOUS, &qdev->flags);
4356 }
4357 }
4358 } else {
4359 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4360 if (ql_set_routing_reg
4361 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004362 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004363 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004364 } else {
4365 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4366 }
4367 }
4368 }
4369
4370 /*
4371 * Set or clear all multicast mode if a
4372 * transition is taking place.
4373 */
4374 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004375 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004376 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4377 if (ql_set_routing_reg
4378 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004379 netif_err(qdev, hw, qdev->ndev,
4380 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004381 } else {
4382 set_bit(QL_ALLMULTI, &qdev->flags);
4383 }
4384 }
4385 } else {
4386 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4387 if (ql_set_routing_reg
4388 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004389 netif_err(qdev, hw, qdev->ndev,
4390 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004391 } else {
4392 clear_bit(QL_ALLMULTI, &qdev->flags);
4393 }
4394 }
4395 }
4396
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004397 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004398 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4399 if (status)
4400 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004401 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004402 netdev_for_each_mc_addr(ha, ndev) {
4403 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004404 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004405 netif_err(qdev, hw, qdev->ndev,
4406 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004407 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004408 goto exit;
4409 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004410 i++;
4411 }
Ron Mercercc288f52009-02-23 10:42:14 +00004412 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004413 if (ql_set_routing_reg
4414 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004415 netif_err(qdev, hw, qdev->ndev,
4416 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004417 } else {
4418 set_bit(QL_ALLMULTI, &qdev->flags);
4419 }
4420 }
4421exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004422 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004423}
4424
4425static int qlge_set_mac_address(struct net_device *ndev, void *p)
4426{
Joe Perchesb16fed02010-11-15 11:12:28 +00004427 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004428 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004429 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004430
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004431 if (!is_valid_ether_addr(addr->sa_data))
4432 return -EADDRNOTAVAIL;
4433 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004434 /* Update local copy of current mac address. */
4435 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004436
Ron Mercercc288f52009-02-23 10:42:14 +00004437 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4438 if (status)
4439 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004440 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4441 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004442 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004443 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004444 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4445 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004446}
4447
4448static void qlge_tx_timeout(struct net_device *ndev)
4449{
Joe Perchesb16fed02010-11-15 11:12:28 +00004450 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004451 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004452}
4453
4454static void ql_asic_reset_work(struct work_struct *work)
4455{
4456 struct ql_adapter *qdev =
4457 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004458 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004459 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004460 status = ql_adapter_down(qdev);
4461 if (status)
4462 goto error;
4463
4464 status = ql_adapter_up(qdev);
4465 if (status)
4466 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004467
4468 /* Restore rx mode. */
4469 clear_bit(QL_ALLMULTI, &qdev->flags);
4470 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4471 qlge_set_multicast_list(qdev->ndev);
4472
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004473 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004474 return;
4475error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004476 netif_alert(qdev, ifup, qdev->ndev,
4477 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004478
Ron Mercerdb988122009-03-09 10:59:17 +00004479 set_bit(QL_ADAPTER_UP, &qdev->flags);
4480 dev_close(qdev->ndev);
4481 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004482}
4483
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004484static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004485 .get_flash = ql_get_8012_flash_params,
4486 .port_initialize = ql_8012_port_initialize,
4487};
4488
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004489static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004490 .get_flash = ql_get_8000_flash_params,
4491 .port_initialize = ql_8000_port_initialize,
4492};
4493
Ron Mercere4552f52009-06-09 05:39:32 +00004494/* Find the pcie function number for the other NIC
4495 * on this chip. Since both NIC functions share a
4496 * common firmware we have the lowest enabled function
4497 * do any common work. Examples would be resetting
4498 * after a fatal firmware error, or doing a firmware
4499 * coredump.
4500 */
4501static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004502{
Ron Mercere4552f52009-06-09 05:39:32 +00004503 int status = 0;
4504 u32 temp;
4505 u32 nic_func1, nic_func2;
4506
4507 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4508 &temp);
4509 if (status)
4510 return status;
4511
4512 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4513 MPI_TEST_NIC_FUNC_MASK);
4514 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4515 MPI_TEST_NIC_FUNC_MASK);
4516
4517 if (qdev->func == nic_func1)
4518 qdev->alt_func = nic_func2;
4519 else if (qdev->func == nic_func2)
4520 qdev->alt_func = nic_func1;
4521 else
4522 status = -EIO;
4523
4524 return status;
4525}
4526
4527static int ql_get_board_info(struct ql_adapter *qdev)
4528{
4529 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004530 qdev->func =
4531 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004532 if (qdev->func > 3)
4533 return -EIO;
4534
4535 status = ql_get_alt_pcie_func(qdev);
4536 if (status)
4537 return status;
4538
4539 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4540 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004541 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4542 qdev->port_link_up = STS_PL1;
4543 qdev->port_init = STS_PI1;
4544 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4545 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4546 } else {
4547 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4548 qdev->port_link_up = STS_PL0;
4549 qdev->port_init = STS_PI0;
4550 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4551 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4552 }
4553 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004554 qdev->device_id = qdev->pdev->device;
4555 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4556 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004557 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4558 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004559 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004560}
4561
4562static void ql_release_all(struct pci_dev *pdev)
4563{
4564 struct net_device *ndev = pci_get_drvdata(pdev);
4565 struct ql_adapter *qdev = netdev_priv(ndev);
4566
4567 if (qdev->workqueue) {
4568 destroy_workqueue(qdev->workqueue);
4569 qdev->workqueue = NULL;
4570 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004571
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004572 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004573 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574 if (qdev->doorbell_area)
4575 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004576 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004577 pci_release_regions(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578}
4579
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004580static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4581 int cards_found)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004582{
4583 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004584 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004585
Ron Mercere3324712009-07-02 06:06:13 +00004586 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 err = pci_enable_device(pdev);
4588 if (err) {
4589 dev_err(&pdev->dev, "PCI device enable failed.\n");
4590 return err;
4591 }
4592
Ron Mercerebd6e772009-09-29 08:39:25 +00004593 qdev->ndev = ndev;
4594 qdev->pdev = pdev;
4595 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004596
Ron Mercerbc9167f2009-10-10 09:35:04 +00004597 /* Set PCIe read request size */
4598 err = pcie_set_readrq(pdev, 4096);
4599 if (err) {
4600 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004601 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004602 }
4603
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004604 err = pci_request_regions(pdev, DRV_NAME);
4605 if (err) {
4606 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004607 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004608 }
4609
4610 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004611 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004612 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004613 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004614 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004615 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004616 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004617 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004618 }
4619
4620 if (err) {
4621 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004622 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004623 }
4624
Ron Mercer73475332009-11-06 07:44:58 +00004625 /* Set PCIe reset type for EEH to fundamental. */
4626 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004627 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004628 qdev->reg_base =
4629 ioremap_nocache(pci_resource_start(pdev, 1),
4630 pci_resource_len(pdev, 1));
4631 if (!qdev->reg_base) {
4632 dev_err(&pdev->dev, "Register mapping failed.\n");
4633 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004634 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004635 }
4636
4637 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4638 qdev->doorbell_area =
4639 ioremap_nocache(pci_resource_start(pdev, 3),
4640 pci_resource_len(pdev, 3));
4641 if (!qdev->doorbell_area) {
4642 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4643 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004644 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004645 }
4646
Ron Mercere4552f52009-06-09 05:39:32 +00004647 err = ql_get_board_info(qdev);
4648 if (err) {
4649 dev_err(&pdev->dev, "Register access failed.\n");
4650 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004651 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004652 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004653 qdev->msg_enable = netif_msg_init(debug, default_msg);
4654 spin_lock_init(&qdev->hw_lock);
4655 spin_lock_init(&qdev->stats_lock);
4656
Ron Mercer8aae2602010-01-15 13:31:28 +00004657 if (qlge_mpi_coredump) {
4658 qdev->mpi_coredump =
4659 vmalloc(sizeof(struct ql_mpi_coredump));
4660 if (qdev->mpi_coredump == NULL) {
Ron Mercer8aae2602010-01-15 13:31:28 +00004661 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004662 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004663 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004664 if (qlge_force_coredump)
4665 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004666 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004667 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004668 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004669 if (err) {
4670 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004671 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004672 }
4673
Ron Mercer801e9092010-02-17 06:41:22 +00004674 /* Keep local copy of current mac address. */
4675 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004676
4677 /* Set up the default ring sizes. */
4678 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4679 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4680
4681 /* Set up the coalescing parameters. */
4682 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4683 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4684 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4685 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4686
4687 /*
4688 * Set up the operating parameters.
4689 */
Amitoj Kaur Chawlaac18dd92016-04-09 17:27:45 +05304690 qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004691 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4692 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4693 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004694 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004695 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004696 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004697 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004698 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004699
4700 if (!cards_found) {
4701 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4702 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4703 DRV_NAME, DRV_VERSION);
4704 }
4705 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004706err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004707 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004708err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004709 pci_disable_device(pdev);
4710 return err;
4711}
4712
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004713static const struct net_device_ops qlge_netdev_ops = {
4714 .ndo_open = qlge_open,
4715 .ndo_stop = qlge_close,
4716 .ndo_start_xmit = qlge_send,
4717 .ndo_change_mtu = qlge_change_mtu,
4718 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004719 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004720 .ndo_set_mac_address = qlge_set_mac_address,
4721 .ndo_validate_addr = eth_validate_addr,
4722 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004723 .ndo_fix_features = qlge_fix_features,
4724 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004725 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4726 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004727};
4728
Ron Mercer15c052f2010-02-04 13:32:46 -08004729static void ql_timer(unsigned long data)
4730{
4731 struct ql_adapter *qdev = (struct ql_adapter *)data;
4732 u32 var = 0;
4733
4734 var = ql_read32(qdev, STS);
4735 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004736 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004737 return;
4738 }
4739
Breno Leitao72046d82010-07-01 03:00:17 +00004740 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004741}
4742
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004743static int qlge_probe(struct pci_dev *pdev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004744 const struct pci_device_id *pci_entry)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004745{
4746 struct net_device *ndev = NULL;
4747 struct ql_adapter *qdev = NULL;
4748 static int cards_found = 0;
4749 int err = 0;
4750
Ron Mercer1e213302009-03-09 10:59:21 +00004751 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004752 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004753 if (!ndev)
4754 return -ENOMEM;
4755
4756 err = ql_init_device(pdev, ndev, cards_found);
4757 if (err < 0) {
4758 free_netdev(ndev);
4759 return err;
4760 }
4761
4762 qdev = netdev_priv(ndev);
4763 SET_NETDEV_DEV(ndev, &pdev->dev);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04004764 ndev->hw_features = NETIF_F_SG |
4765 NETIF_F_IP_CSUM |
4766 NETIF_F_TSO |
4767 NETIF_F_TSO_ECN |
4768 NETIF_F_HW_VLAN_CTAG_TX |
4769 NETIF_F_HW_VLAN_CTAG_RX |
4770 NETIF_F_HW_VLAN_CTAG_FILTER |
4771 NETIF_F_RXCSUM;
4772 ndev->features = ndev->hw_features;
brenohl@br.ibm.com1a0150a92012-07-27 08:54:52 +00004773 ndev->vlan_features = ndev->hw_features;
Jitendra Kalsaria51bb3522014-01-14 13:57:25 -05004774 /* vlan gets same features (except vlan filter) */
Vlad Yasevichf6d1ac42014-03-27 22:14:46 -04004775 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4776 NETIF_F_HW_VLAN_CTAG_TX |
4777 NETIF_F_HW_VLAN_CTAG_RX);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004778
4779 if (test_bit(QL_DMA64, &qdev->flags))
4780 ndev->features |= NETIF_F_HIGHDMA;
4781
4782 /*
4783 * Set up net_device structure.
4784 */
4785 ndev->tx_queue_len = qdev->tx_ring_size;
4786 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004787
4788 ndev->netdev_ops = &qlge_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004789 ndev->ethtool_ops = &qlge_ethtool_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004790 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004791
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004792 err = register_netdev(ndev);
4793 if (err) {
4794 dev_err(&pdev->dev, "net device registration failed.\n");
4795 ql_release_all(pdev);
4796 pci_disable_device(pdev);
Wei Yongjun4d2593c2013-05-22 23:09:50 +00004797 free_netdev(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004798 return err;
4799 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004800 /* Start up the timer to trigger EEH if
4801 * the bus goes dead
4802 */
4803 init_timer_deferrable(&qdev->timer);
4804 qdev->timer.data = (unsigned long)qdev;
4805 qdev->timer.function = ql_timer;
4806 qdev->timer.expires = jiffies + (5*HZ);
4807 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004808 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004809 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004810 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004811 cards_found++;
4812 return 0;
4813}
4814
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004815netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4816{
4817 return qlge_send(skb, ndev);
4818}
4819
4820int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4821{
4822 return ql_clean_inbound_rx_ring(rx_ring, budget);
4823}
4824
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004825static void qlge_remove(struct pci_dev *pdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004826{
4827 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004828 struct ql_adapter *qdev = netdev_priv(ndev);
4829 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004830 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004831 unregister_netdev(ndev);
4832 ql_release_all(pdev);
4833 pci_disable_device(pdev);
4834 free_netdev(ndev);
4835}
4836
Ron Mercer6d190c62009-10-28 08:39:20 +00004837/* Clean up resources without touching hardware. */
4838static void ql_eeh_close(struct net_device *ndev)
4839{
4840 int i;
4841 struct ql_adapter *qdev = netdev_priv(ndev);
4842
4843 if (netif_carrier_ok(ndev)) {
4844 netif_carrier_off(ndev);
4845 netif_stop_queue(ndev);
4846 }
4847
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004848 /* Disabling the timer */
4849 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004850 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004851
4852 for (i = 0; i < qdev->rss_ring_count; i++)
4853 netif_napi_del(&qdev->rx_ring[i].napi);
4854
4855 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4856 ql_tx_ring_clean(qdev);
4857 ql_free_rx_buffers(qdev);
4858 ql_release_adapter_resources(qdev);
4859}
4860
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004861/*
4862 * This callback is called by the PCI subsystem whenever
4863 * a PCI bus error is detected.
4864 */
4865static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4866 enum pci_channel_state state)
4867{
4868 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004869 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004870
Ron Mercer6d190c62009-10-28 08:39:20 +00004871 switch (state) {
4872 case pci_channel_io_normal:
4873 return PCI_ERS_RESULT_CAN_RECOVER;
4874 case pci_channel_io_frozen:
4875 netif_device_detach(ndev);
4876 if (netif_running(ndev))
4877 ql_eeh_close(ndev);
4878 pci_disable_device(pdev);
4879 return PCI_ERS_RESULT_NEED_RESET;
4880 case pci_channel_io_perm_failure:
4881 dev_err(&pdev->dev,
4882 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004883 ql_eeh_close(ndev);
4884 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004885 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004886 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004887
4888 /* Request a slot reset. */
4889 return PCI_ERS_RESULT_NEED_RESET;
4890}
4891
4892/*
4893 * This callback is called after the PCI buss has been reset.
4894 * Basically, this tries to restart the card from scratch.
4895 * This is a shortened version of the device probe/discovery code,
4896 * it resembles the first-half of the () routine.
4897 */
4898static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4899{
4900 struct net_device *ndev = pci_get_drvdata(pdev);
4901 struct ql_adapter *qdev = netdev_priv(ndev);
4902
Ron Mercer6d190c62009-10-28 08:39:20 +00004903 pdev->error_state = pci_channel_io_normal;
4904
4905 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004906 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004907 netif_err(qdev, ifup, qdev->ndev,
4908 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004909 return PCI_ERS_RESULT_DISCONNECT;
4910 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004911 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004912
4913 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004914 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004915 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004916 return PCI_ERS_RESULT_DISCONNECT;
4917 }
4918
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004919 return PCI_ERS_RESULT_RECOVERED;
4920}
4921
4922static void qlge_io_resume(struct pci_dev *pdev)
4923{
4924 struct net_device *ndev = pci_get_drvdata(pdev);
4925 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004926 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004927
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004928 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004929 err = qlge_open(ndev);
4930 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004931 netif_err(qdev, ifup, qdev->ndev,
4932 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004933 return;
4934 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004935 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004936 netif_err(qdev, ifup, qdev->ndev,
4937 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004938 }
Breno Leitao72046d82010-07-01 03:00:17 +00004939 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004940 netif_device_attach(ndev);
4941}
4942
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004943static const struct pci_error_handlers qlge_err_handler = {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004944 .error_detected = qlge_io_error_detected,
4945 .slot_reset = qlge_io_slot_reset,
4946 .resume = qlge_io_resume,
4947};
4948
4949static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4950{
4951 struct net_device *ndev = pci_get_drvdata(pdev);
4952 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004953 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004954
4955 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004956 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004957
4958 if (netif_running(ndev)) {
4959 err = ql_adapter_down(qdev);
4960 if (!err)
4961 return err;
4962 }
4963
Ron Mercerbc083ce2009-10-21 11:07:40 +00004964 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004965 err = pci_save_state(pdev);
4966 if (err)
4967 return err;
4968
4969 pci_disable_device(pdev);
4970
4971 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4972
4973 return 0;
4974}
4975
David S. Miller04da2cf2008-09-19 16:14:24 -07004976#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004977static int qlge_resume(struct pci_dev *pdev)
4978{
4979 struct net_device *ndev = pci_get_drvdata(pdev);
4980 struct ql_adapter *qdev = netdev_priv(ndev);
4981 int err;
4982
4983 pci_set_power_state(pdev, PCI_D0);
4984 pci_restore_state(pdev);
4985 err = pci_enable_device(pdev);
4986 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004987 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004988 return err;
4989 }
4990 pci_set_master(pdev);
4991
4992 pci_enable_wake(pdev, PCI_D3hot, 0);
4993 pci_enable_wake(pdev, PCI_D3cold, 0);
4994
4995 if (netif_running(ndev)) {
4996 err = ql_adapter_up(qdev);
4997 if (err)
4998 return err;
4999 }
5000
Breno Leitao72046d82010-07-01 03:00:17 +00005001 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005002 netif_device_attach(ndev);
5003
5004 return 0;
5005}
David S. Miller04da2cf2008-09-19 16:14:24 -07005006#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005007
5008static void qlge_shutdown(struct pci_dev *pdev)
5009{
5010 qlge_suspend(pdev, PMSG_SUSPEND);
5011}
5012
5013static struct pci_driver qlge_driver = {
5014 .name = DRV_NAME,
5015 .id_table = qlge_pci_tbl,
5016 .probe = qlge_probe,
Bill Pemberton5d8e8722012-12-03 09:23:27 -05005017 .remove = qlge_remove,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005018#ifdef CONFIG_PM
5019 .suspend = qlge_suspend,
5020 .resume = qlge_resume,
5021#endif
5022 .shutdown = qlge_shutdown,
5023 .err_handler = &qlge_err_handler
5024};
5025
Peter Hüwe70a611d2013-05-21 12:58:08 +00005026module_pci_driver(qlge_driver);