blob: 188626e2a861d317510617dce742595b88bd3abd [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +00009#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040010#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000036#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040041#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070042#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040043
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000061/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040063/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
Sonny Rao84cf7022010-11-18 11:50:02 +000066static int debug = -1; /* defaults above */
67module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040068MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000073static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000074module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000075MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040076
Ron Mercer8aae2602010-01-15 13:31:28 +000077static int qlge_mpi_coredump;
78module_param(qlge_mpi_coredump, int, 0);
79MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000081 "Default is OFF - Do Not allocate memory. ");
82
83static int qlge_force_coredump;
84module_param(qlge_force_coredump, int, 0);
85MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000088
Benoit Taine9baa3c32014-08-08 15:56:03 +020089static const struct pci_device_id qlge_pci_tbl[] = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040092 /* required last entry */
93 {0,}
94};
95
96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -040098static int ql_wol(struct ql_adapter *);
99static void qlge_set_multicast_list(struct net_device *);
100static int ql_adapter_down(struct ql_adapter *);
101static int ql_adapter_up(struct ql_adapter *);
stephen hemmingerac409212010-10-21 07:50:54 +0000102
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400103/* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
106 */
107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108{
109 u32 sem_bits = 0;
110
111 switch (sem_mask) {
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 break;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 break;
118 case SEM_ICB_MASK:
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 break;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 break;
124 case SEM_FLASH_MASK:
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 break;
127 case SEM_PROBE_MASK:
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 break;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 break;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 break;
136 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400138 return -EINVAL;
139 }
140
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
143}
144
145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000147 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400148 do {
149 if (!ql_sem_trylock(qdev, sem_mask))
150 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000151 udelay(100);
152 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400153 return -ETIMEDOUT;
154}
155
156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157{
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
160}
161
162/* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 */
167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168{
169 u32 temp;
170 int count = UDELAY_COUNT;
171
172 while (count) {
173 temp = ql_read32(qdev, reg);
174
175 /* check for errors */
176 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400180 return -EIO;
181 } else if (temp & bit)
182 return 0;
183 udelay(UDELAY_DELAY);
184 count--;
185 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400188 return -ETIMEDOUT;
189}
190
191/* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
193 */
194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195{
196 int count = UDELAY_COUNT;
197 u32 temp;
198
199 while (count) {
200 temp = ql_read32(qdev, CFG);
201 if (temp & CFG_LE)
202 return -EIO;
203 if (!(temp & bit))
204 return 0;
205 udelay(UDELAY_DELAY);
206 count--;
207 }
208 return -ETIMEDOUT;
209}
210
211
212/* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
214 */
215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 u16 q_id)
217{
218 u64 map;
219 int status = 0;
220 int direction;
221 u32 mask;
222 u32 value;
223
224 direction =
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 PCI_DMA_FROMDEVICE;
227
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400231 return -ENOMEM;
232 }
233
Ron Mercer4322c5b2009-07-02 06:06:06 +0000234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 if (status)
236 return status;
237
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400238 status = ql_wait_cfg(qdev, bit);
239 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400242 goto exit;
243 }
244
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400247
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
251
252 /*
253 * Wait for the bit to clear after signaling hw.
254 */
255 status = ql_wait_cfg(qdev, bit);
256exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 pci_unmap_single(qdev->pdev, map, size, direction);
259 return status;
260}
261
262/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 u32 *value)
265{
266 u32 offset = 0;
267 int status;
268
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400269 switch (type) {
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
272 {
273 status =
274 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400276 if (status)
277 goto exit;
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 status =
288 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 status =
303 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400305 if (status)
306 goto exit;
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 status =
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800312 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400313 if (status)
314 goto exit;
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 }
317 break;
318 }
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
321 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400324 status = -EPERM;
325 }
326exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400327 return status;
328}
329
330/* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
332 */
333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 u16 index)
335{
336 u32 offset = 0;
337 int status = 0;
338
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400339 switch (type) {
340 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000341 {
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
353 type | MAC_ADDR_E);
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
362 type | MAC_ADDR_E);
363
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 status =
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 goto exit;
370 break;
371 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 case MAC_ADDR_TYPE_CAM_MAC:
373 {
374 u32 cam_output;
375 u32 upper = (addr[0] << 8) | addr[1];
376 u32 lower =
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400379 status =
380 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400382 if (status)
383 goto exit;
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 type); /* type */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 status =
389 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400391 if (status)
392 goto exit;
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 type); /* type */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 status =
398 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400400 if (status)
401 goto exit;
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 type); /* type */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
408 */
Ron Mercer76b26692009-10-08 09:54:40 +0000409 cam_output = (CAM_OUT_ROUTE_NIC |
410 (qdev->
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
Patrick McHardyf6469682013-04-19 02:04:27 +0000413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400417 break;
418 }
419 case MAC_ADDR_TYPE_VLAN:
420 {
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
426 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400427 status =
428 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400430 if (status)
431 goto exit;
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 type | /* type */
435 enable_bit); /* enable/disable */
436 break;
437 }
438 case MAC_ADDR_TYPE_MULTI_FLTR:
439 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400442 status = -EPERM;
443 }
444exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400445 return status;
446}
447
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000448/* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
451 */
452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453{
454 int status;
455 char zero_mac_addr[ETH_ALEN];
456 char *addr;
457
458 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000459 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000462 } else {
463 memset(zero_mac_addr, 0, ETH_ALEN);
464 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000467 }
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 if (status)
470 return status;
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000477 return status;
478}
479
Ron Mercer6a473302009-07-02 06:06:12 +0000480void ql_link_on(struct ql_adapter *qdev)
481{
Joe Perchesae9540f72010-02-09 11:49:52 +0000482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
485}
486
487void ql_link_off(struct ql_adapter *qdev)
488{
Joe Perchesae9540f72010-02-09 11:49:52 +0000489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
492}
493
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400494/* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
496 */
497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498{
499 int status = 0;
500
Ron Mercer939678f2009-01-04 17:08:29 -0800501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400502 if (status)
503 goto exit;
504
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400508 if (status)
509 goto exit;
510 *value = ql_read32(qdev, RT_DATA);
511exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400512 return status;
513}
514
515/* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
519 */
520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 int enable)
522{
Ron Mercer8587ea32009-02-23 10:42:15 +0000523 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400524 u32 value = 0;
525
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400526 switch (mask) {
527 case RT_IDX_CAM_HIT:
528 {
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 break;
533 }
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 {
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 break;
540 }
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 {
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 break;
547 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 {
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
554 break;
555 }
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 {
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
562 break;
563 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 {
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 break;
570 }
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000573 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 break;
577 }
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000580 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 break;
584 }
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 {
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 break;
591 }
592 case 0: /* Clear the E-bit on an entry. */
593 {
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
597 break;
598 }
599 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400602 status = -EPERM;
603 goto exit;
604 }
605
606 if (value) {
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 if (status)
609 goto exit;
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
613 }
614exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 return status;
616}
617
618static void ql_enable_interrupts(struct ql_adapter *qdev)
619{
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621}
622
623static void ql_disable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626}
627
628/* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300631 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400632 * a worker finishes. Once it hits zero we enable the interrupt.
633 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400635{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700636 u32 var = 0;
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
639
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400644 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700645 ctx->intr_en_mask);
646 var = ql_read32(qdev, STS);
647 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400648 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700649
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
653 ctx->intr_en_mask);
654 var = ql_read32(qdev, STS);
655 }
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400658}
659
660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661{
662 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700663 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400664
Ron Mercerbb0d2152008-10-20 10:30:26 -0700665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
667 */
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669 return 0;
670
671 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000672 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700673 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400674 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700675 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400676 var = ql_read32(qdev, STS);
677 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000679 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400680 return var;
681}
682
683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684{
685 int i;
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
690 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 i == 0))
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400694 ql_enable_completion_interrupt(qdev, i);
695 }
696
697}
698
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700{
701 int status, i;
702 u16 csum = 0;
703 __le16 *flash = (__le16 *)&qdev->flash;
704
705 status = strncmp((char *)&qdev->flash, str, 4);
706 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000708 return status;
709 }
710
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
713
714 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000717
718 return csum;
719}
720
Ron Mercer26351472009-02-02 13:53:57 -0800721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400722{
723 int status = 0;
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727 if (status)
728 goto exit;
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734 if (status)
735 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
739 */
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400741exit:
742 return status;
743}
744
Ron Mercercdca8d02009-03-02 08:07:31 +0000745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746{
747 u32 i, size;
748 int status;
749 __le32 *p = (__le32 *)&qdev->flash;
750 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000751 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000752
753 /* Get flash offset for function and adjust
754 * for dword access.
755 */
Ron Mercere4552f52009-06-09 05:39:32 +0000756 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 else
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762 return -ETIMEDOUT;
763
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
767 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000770 goto exit;
771 }
772 }
773
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
776 "8000");
777 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000779 status = -EINVAL;
780 goto exit;
781 }
782
Ron Mercer542512e2009-06-09 05:39:33 +0000783 /* Extract either manufacturer or BOFM modified
784 * MAC address.
785 */
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 memcpy(mac_addr,
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
790 else
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
794
795 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000797 status = -EINVAL;
798 goto exit;
799 }
800
801 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000802 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000803 qdev->ndev->addr_len);
804
805exit:
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
807 return status;
808}
809
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400811{
812 int i;
813 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800814 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800815 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800817
818 /* Second function's parameters follow the first
819 * function's.
820 */
Ron Mercere4552f52009-06-09 05:39:32 +0000821 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000822 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400823
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825 return -ETIMEDOUT;
826
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000827 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800828 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400829 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400832 goto exit;
833 }
834
835 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000836
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
839 "8012");
840 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000842 status = -EINVAL;
843 goto exit;
844 }
845
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847 status = -EINVAL;
848 goto exit;
849 }
850
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
854
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400855exit:
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
857 return status;
858}
859
860/* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
863 */
864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865{
866 int status;
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 return status;
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
876 return status;
877}
878
879/* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
882 */
883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884{
885 int status = 0;
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889 if (status)
890 goto exit;
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896 if (status)
897 goto exit;
898 /* get the data */
899 *data = ql_read32(qdev, XGMAC_DATA);
900exit:
901 return status;
902}
903
904/* This is used for reading the 64-bit statistics regs. */
905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906{
907 int status = 0;
908 u32 hi = 0;
909 u32 lo = 0;
910
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
912 if (status)
913 goto exit;
914
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916 if (status)
917 goto exit;
918
919 *data = (u64) lo | ((u64) hi << 32);
920
921exit:
922 return status;
923}
924
Ron Mercercdca8d02009-03-02 08:07:31 +0000925static int ql_8000_port_initialize(struct ql_adapter *qdev)
926{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000927 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000928 /*
929 * Get MPI firmware version for driver banner
930 * and ethool info.
931 */
932 status = ql_mb_about_fw(qdev);
933 if (status)
934 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000935 status = ql_mb_get_fw_state(qdev);
936 if (status)
937 goto exit;
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940exit:
941 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000942}
943
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400944/* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
948 * later date.
949 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000950static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400951{
952 int status = 0;
953 u32 data;
954
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
958 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400965 }
966 return status;
967 }
968
Joe Perchesae9540f72010-02-09 11:49:52 +0000969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972 if (status)
973 goto end;
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976 if (status)
977 goto end;
978
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985 if (status)
986 goto end;
987
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990 if (status)
991 goto end;
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995 if (status)
996 goto end;
997
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000 if (status)
1001 goto end;
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005 if (status)
1006 goto end;
1007
1008 /* Turn on jumbo. */
1009 status =
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011 if (status)
1012 goto end;
1013 status =
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015 if (status)
1016 goto end;
1017
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022 return status;
1023}
1024
Ron Mercer7c734352009-10-19 03:32:19 +00001025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001030/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001032{
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1038 return lbq_desc;
1039}
1040
Ron Mercer7c734352009-10-19 03:32:19 +00001041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1043{
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001047 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1050
1051 /* If it's the last chunk of our master page then
1052 * we unmap it.
1053 */
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1060 return lbq_desc;
1061}
1062
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001063/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001065{
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1071 return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1082 }
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
Ron Mercer7c734352009-10-19 03:32:19 +00001090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1092{
1093 if (!rx_ring->pg_chunk.page) {
1094 u64 map;
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 GFP_ATOMIC,
1097 qdev->lbq_buf_order);
1098 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001099 netif_err(qdev, drv, qdev->ndev,
1100 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001101 return -ENOMEM;
1102 }
1103 rx_ring->pg_chunk.offset = 0;
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 0, ql_lbq_block_size(qdev),
1106 PCI_DMA_FROMDEVICE);
1107 if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 __free_pages(rx_ring->pg_chunk.page,
1109 qdev->lbq_buf_order);
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00001110 rx_ring->pg_chunk.page = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00001111 netif_err(qdev, drv, qdev->ndev,
1112 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001113 return -ENOMEM;
1114 }
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 }
1118
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1121 */
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124 /* Adjust the master page chunk for next
1125 * buffer get.
1126 */
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1131 } else {
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1135 }
1136 return 0;
1137}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
Ron Mercer49f21862009-02-23 10:42:16 +00001141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001143 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 u64 map;
1145 int i;
1146
Ron Mercer7c734352009-10-19 03:32:19 +00001147 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 "lbq: try cleaning clean_idx = %d.\n",
1151 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001152 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001154 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001155 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001156 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001158 return;
1159 }
Ron Mercer7c734352009-10-19 03:32:19 +00001160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001163 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001165 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001166 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
Ron Mercer49f21862009-02-23 10:42:16 +00001195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001203 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001208 netif_printk(qdev, rx_status, KERN_DEBUG,
1209 qdev->ndev,
1210 "sbq: getting new skb for index %d.\n",
1211 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001212 sbq_desc->p.skb =
1213 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001214 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001215 if (sbq_desc->p.skb == NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001224 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001287 }
1288 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001289 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001290 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001291 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001297 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001298 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001300 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305}
1306
1307/* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001394 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
Eric Dumazet9e903e02011-10-18 21:00:24 +00001401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001402 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001403
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001404 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001405 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001416 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001436/* Categorizing receive firmware frame errors */
Sritej Velagaae721f32013-04-18 19:49:52 +00001437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001439{
1440 struct nic_stats *stats = &qdev->nic_stats;
1441
1442 stats->rx_err_count++;
Sritej Velagaae721f32013-04-18 19:49:52 +00001443 rx_ring->rx_errors++;
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001444
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1448 break;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1451 break;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1454 break;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1457 break;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1460 break;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1463 default:
1464 break;
1465 }
1466}
1467
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001468/**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
1472static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 void *page, size_t *len)
1475{
1476 u16 *tags;
1477
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479 return;
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481 tags = (u16 *)page;
1482 /* Look for stacked vlan tags in ethertype field */
1483 if (tags[6] == ETH_P_8021Q &&
1484 tags[8] == ETH_P_8021Q)
1485 *len += 2 * VLAN_HLEN;
1486 else
1487 *len += VLAN_HLEN;
1488 }
1489}
1490
Ron Mercer4f848c02010-01-02 10:37:43 +00001491/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001492static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497{
1498 struct sk_buff *skb;
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001500 struct napi_struct *napi = &rx_ring->napi;
1501
Sritej Velagaae721f32013-04-18 19:49:52 +00001502 /* Frame error, so drop the packet. */
1503 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 put_page(lbq_desc->p.pg_chunk.page);
1506 return;
1507 }
Ron Mercer63526712010-01-02 10:37:44 +00001508 napi->dev = qdev->ndev;
1509
1510 skb = napi_get_frags(napi);
1511 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001512 netif_err(qdev, drv, qdev->ndev,
1513 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001514 rx_ring->rx_dropped++;
1515 put_page(lbq_desc->p.pg_chunk.page);
1516 return;
1517 }
1518 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001519 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 lbq_desc->p.pg_chunk.page,
1521 lbq_desc->p.pg_chunk.offset,
1522 length);
Ron Mercer63526712010-01-02 10:37:44 +00001523
1524 skb->len += length;
1525 skb->data_len += length;
1526 skb->truesize += length;
1527 skb_shinfo(skb)->nr_frags++;
1528
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += length;
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001533 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001535 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001536}
1537
1538/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001539static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 struct rx_ring *rx_ring,
1541 struct ib_mac_iocb_rsp *ib_mac_rsp,
1542 u32 length,
1543 u16 vlan_id)
1544{
1545 struct net_device *ndev = qdev->ndev;
1546 struct sk_buff *skb = NULL;
1547 void *addr;
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 struct napi_struct *napi = &rx_ring->napi;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001550 size_t hlen = ETH_HLEN;
Ron Mercer4f848c02010-01-02 10:37:43 +00001551
1552 skb = netdev_alloc_skb(ndev, length);
1553 if (!skb) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001554 rx_ring->rx_dropped++;
1555 put_page(lbq_desc->p.pg_chunk.page);
1556 return;
1557 }
1558
1559 addr = lbq_desc->p.pg_chunk.va;
1560 prefetch(addr);
1561
Sritej Velagaae721f32013-04-18 19:49:52 +00001562 /* Frame error, so drop the packet. */
1563 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565 goto err_out;
1566 }
1567
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001568 /* Update the MAC header length*/
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
Ron Mercer4f848c02010-01-02 10:37:43 +00001571 /* The max framesize filter on this chip is set higher than
1572 * MTU since FCoE uses 2k frames.
1573 */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001574 if (skb->len > ndev->mtu + hlen) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001575 netif_err(qdev, drv, qdev->ndev,
1576 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001577 rx_ring->rx_dropped++;
1578 goto err_out;
1579 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001580 memcpy(skb_put(skb, hlen), addr, hlen);
Joe Perchesae9540f72010-02-09 11:49:52 +00001581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001585 lbq_desc->p.pg_chunk.offset + hlen,
1586 length - hlen);
1587 skb->len += length - hlen;
1588 skb->data_len += length - hlen;
1589 skb->truesize += length - hlen;
Ron Mercer4f848c02010-01-02 10:37:43 +00001590
1591 rx_ring->rx_packets++;
1592 rx_ring->rx_bytes += skb->len;
1593 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001594 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001595
Michał Mirosław88230fd2011-04-18 13:31:21 +00001596 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001597 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598 /* TCP frame. */
1599 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001606 struct iphdr *iph =
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001607 (struct iphdr *)((u8 *)addr + hlen);
Ron Mercer4f848c02010-01-02 10:37:43 +00001608 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001609 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001610 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001611 netif_printk(qdev, rx_status, KERN_DEBUG,
1612 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001613 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001614 }
1615 }
1616 }
1617
1618 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001619 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001621 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 napi_gro_receive(napi, skb);
1623 else
1624 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001625 return;
1626err_out:
1627 dev_kfree_skb_any(skb);
1628 put_page(lbq_desc->p.pg_chunk.page);
1629}
1630
1631/* Process an inbound completion from an rx ring. */
1632static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 struct rx_ring *rx_ring,
1634 struct ib_mac_iocb_rsp *ib_mac_rsp,
1635 u32 length,
1636 u16 vlan_id)
1637{
1638 struct net_device *ndev = qdev->ndev;
1639 struct sk_buff *skb = NULL;
1640 struct sk_buff *new_skb = NULL;
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643 skb = sbq_desc->p.skb;
1644 /* Allocate new_skb and copy */
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 if (new_skb == NULL) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001647 rx_ring->rx_dropped++;
1648 return;
1649 }
1650 skb_reserve(new_skb, NET_IP_ALIGN);
1651 memcpy(skb_put(new_skb, length), skb->data, length);
1652 skb = new_skb;
1653
Sritej Velagaae721f32013-04-18 19:49:52 +00001654 /* Frame error, so drop the packet. */
1655 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1656 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1657 dev_kfree_skb_any(skb);
1658 return;
1659 }
1660
Ron Mercer4f848c02010-01-02 10:37:43 +00001661 /* loopback self test for ethtool */
1662 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1663 ql_check_lb_frame(qdev, skb);
1664 dev_kfree_skb_any(skb);
1665 return;
1666 }
1667
1668 /* The max framesize filter on this chip is set higher than
1669 * MTU since FCoE uses 2k frames.
1670 */
1671 if (skb->len > ndev->mtu + ETH_HLEN) {
1672 dev_kfree_skb_any(skb);
1673 rx_ring->rx_dropped++;
1674 return;
1675 }
1676
1677 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001678 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001679 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1680 "%s Multicast.\n",
1681 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1682 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1683 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1684 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1685 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1686 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001687 }
1688 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001691
1692 rx_ring->rx_packets++;
1693 rx_ring->rx_bytes += skb->len;
1694 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001695 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001696
1697 /* If rx checksum is on, and there are no
1698 * csum or frame errors.
1699 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001700 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001701 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1702 /* TCP frame. */
1703 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001704 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1705 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001706 skb->ip_summed = CHECKSUM_UNNECESSARY;
1707 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1708 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1709 /* Unfragmented ipv4 UDP frame. */
1710 struct iphdr *iph = (struct iphdr *) skb->data;
1711 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001712 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001713 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001714 netif_printk(qdev, rx_status, KERN_DEBUG,
1715 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001716 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001717 }
1718 }
1719 }
1720
1721 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001722 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001723 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001724 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1725 napi_gro_receive(&rx_ring->napi, skb);
1726 else
1727 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001728}
1729
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001730static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001731{
1732 void *temp_addr = skb->data;
1733
1734 /* Undo the skb_reserve(skb,32) we did before
1735 * giving to hardware, and realign data on
1736 * a 2-byte boundary.
1737 */
1738 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1739 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1740 skb_copy_to_linear_data(skb, temp_addr,
1741 (unsigned int)len);
1742}
1743
1744/*
1745 * This function builds an skb for the given inbound
1746 * completion. It will be rewritten for readability in the near
1747 * future, but for not it works well.
1748 */
1749static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1750 struct rx_ring *rx_ring,
1751 struct ib_mac_iocb_rsp *ib_mac_rsp)
1752{
1753 struct bq_desc *lbq_desc;
1754 struct bq_desc *sbq_desc;
1755 struct sk_buff *skb = NULL;
1756 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001757 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1758 size_t hlen = ETH_HLEN;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001759
1760 /*
1761 * Handle the header buffer if present.
1762 */
1763 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1764 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001767 /*
1768 * Headers fit nicely into a small buffer.
1769 */
1770 sbq_desc = ql_get_curr_sbuf(rx_ring);
1771 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001772 dma_unmap_addr(sbq_desc, mapaddr),
1773 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001774 PCI_DMA_FROMDEVICE);
1775 skb = sbq_desc->p.skb;
1776 ql_realign_skb(skb, hdr_len);
1777 skb_put(skb, hdr_len);
1778 sbq_desc->p.skb = NULL;
1779 }
1780
1781 /*
1782 * Handle the data buffer(s).
1783 */
1784 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001785 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001787 return skb;
1788 }
1789
1790 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 "Headers in small, data of %d bytes in small, combine them.\n",
1794 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 /*
1796 * Data is less than small buffer size so it's
1797 * stuffed in a small buffer.
1798 * For this case we append the data
1799 * from the "data" small buffer to the "header" small
1800 * buffer.
1801 */
1802 sbq_desc = ql_get_curr_sbuf(rx_ring);
1803 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001804 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001806 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001807 (sbq_desc, maplen),
1808 PCI_DMA_FROMDEVICE);
1809 memcpy(skb_put(skb, length),
1810 sbq_desc->p.skb->data, length);
1811 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001812 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001813 (sbq_desc,
1814 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001815 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001816 (sbq_desc,
1817 maplen),
1818 PCI_DMA_FROMDEVICE);
1819 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "%d bytes in a single small buffer.\n",
1822 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001823 sbq_desc = ql_get_curr_sbuf(rx_ring);
1824 skb = sbq_desc->p.skb;
1825 ql_realign_skb(skb, length);
1826 skb_put(skb, length);
1827 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001828 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001829 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001830 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001831 maplen),
1832 PCI_DMA_FROMDEVICE);
1833 sbq_desc->p.skb = NULL;
1834 }
1835 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1836 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001837 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1838 "Header in small, %d bytes in large. Chain large to small!\n",
1839 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001840 /*
1841 * The data is in a single large buffer. We
1842 * chain it to the header buffer's skb and let
1843 * it rip.
1844 */
Ron Mercer7c734352009-10-19 03:32:19 +00001845 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001846 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1847 "Chaining page at offset = %d, for %d bytes to skb.\n",
1848 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001849 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1850 lbq_desc->p.pg_chunk.offset,
1851 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001852 skb->len += length;
1853 skb->data_len += length;
1854 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001855 } else {
1856 /*
1857 * The headers and data are in a single large buffer. We
1858 * copy it to a new skb and let it go. This can happen with
1859 * jumbo mtu on a non-TCP/UDP frame.
1860 */
Ron Mercer7c734352009-10-19 03:32:19 +00001861 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001862 skb = netdev_alloc_skb(qdev->ndev, length);
1863 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001864 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1865 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001866 return NULL;
1867 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001868 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001869 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001870 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001871 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001872 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001873 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001874 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1875 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1876 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001877 skb_fill_page_desc(skb, 0,
1878 lbq_desc->p.pg_chunk.page,
1879 lbq_desc->p.pg_chunk.offset,
1880 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001881 skb->len += length;
1882 skb->data_len += length;
1883 skb->truesize += length;
1884 length -= length;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001885 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1886 lbq_desc->p.pg_chunk.va,
1887 &hlen);
1888 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001889 }
1890 } else {
1891 /*
1892 * The data is in a chain of large buffers
1893 * pointed to by a small buffer. We loop
1894 * thru and chain them to the our small header
1895 * buffer's skb.
1896 * frags: There are 18 max frags and our small
1897 * buffer will hold 32 of them. The thing is,
1898 * we'll use 3 max for our 9000 byte jumbo
1899 * frames. If the MTU goes up we could
1900 * eventually be in trouble.
1901 */
Ron Mercer7c734352009-10-19 03:32:19 +00001902 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001903 sbq_desc = ql_get_curr_sbuf(rx_ring);
1904 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001905 dma_unmap_addr(sbq_desc, mapaddr),
1906 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001907 PCI_DMA_FROMDEVICE);
1908 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1909 /*
1910 * This is an non TCP/UDP IP frame, so
1911 * the headers aren't split into a small
1912 * buffer. We have to use the small buffer
1913 * that contains our sg list as our skb to
1914 * send upstairs. Copy the sg list here to
1915 * a local buffer and use it to find the
1916 * pages to chain.
1917 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "%d bytes of headers & data in chain of large.\n",
1920 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001921 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001922 sbq_desc->p.skb = NULL;
1923 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001924 }
1925 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001926 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1927 size = (length < rx_ring->lbq_buf_size) ? length :
1928 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001929
Joe Perchesae9540f72010-02-09 11:49:52 +00001930 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1931 "Adding page %d to skb for %d bytes.\n",
1932 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001933 skb_fill_page_desc(skb, i,
1934 lbq_desc->p.pg_chunk.page,
1935 lbq_desc->p.pg_chunk.offset,
1936 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001937 skb->len += size;
1938 skb->data_len += size;
1939 skb->truesize += size;
1940 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001941 i++;
1942 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001943 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1944 &hlen);
1945 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001946 }
1947 return skb;
1948}
1949
1950/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001951static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001952 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001953 struct ib_mac_iocb_rsp *ib_mac_rsp,
1954 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001955{
1956 struct net_device *ndev = qdev->ndev;
1957 struct sk_buff *skb = NULL;
1958
1959 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1960
1961 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1962 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001963 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1964 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001965 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001966 return;
1967 }
1968
Sritej Velagaae721f32013-04-18 19:49:52 +00001969 /* Frame error, so drop the packet. */
1970 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1971 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1972 dev_kfree_skb_any(skb);
1973 return;
1974 }
1975
Ron Mercerec33a492009-06-09 05:39:28 +00001976 /* The max framesize filter on this chip is set higher than
1977 * MTU since FCoE uses 2k frames.
1978 */
1979 if (skb->len > ndev->mtu + ETH_HLEN) {
1980 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001981 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001982 return;
1983 }
1984
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001985 /* loopback self test for ethtool */
1986 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1987 ql_check_lb_frame(qdev, skb);
1988 dev_kfree_skb_any(skb);
1989 return;
1990 }
1991
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001992 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001993 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1995 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1996 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1997 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1998 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1999 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2000 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00002001 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002002 }
2003 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002004 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2005 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002006 }
Ron Mercerd555f592009-03-09 10:59:19 +00002007
Ron Mercerd555f592009-03-09 10:59:19 +00002008 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002009 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002010
2011 /* If rx checksum is on, and there are no
2012 * csum or frame errors.
2013 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00002014 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00002015 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2016 /* TCP frame. */
2017 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002020 skb->ip_summed = CHECKSUM_UNNECESSARY;
2021 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2022 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2023 /* Unfragmented ipv4 UDP frame. */
2024 struct iphdr *iph = (struct iphdr *) skb->data;
2025 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00002026 htons(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002027 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002028 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002030 }
2031 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002032 }
Ron Mercerd555f592009-03-09 10:59:19 +00002033
Ron Mercer885ee392009-11-03 13:49:31 +00002034 rx_ring->rx_packets++;
2035 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002036 skb_record_rx_queue(skb, rx_ring->cq_id);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002037 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002038 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002039 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2040 napi_gro_receive(&rx_ring->napi, skb);
2041 else
2042 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002043}
2044
Ron Mercer4f848c02010-01-02 10:37:43 +00002045/* Process an inbound completion from an rx ring. */
2046static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047 struct rx_ring *rx_ring,
2048 struct ib_mac_iocb_rsp *ib_mac_rsp)
2049{
2050 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002051 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2052 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
Ron Mercer4f848c02010-01-02 10:37:43 +00002053 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2054 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2055
2056 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2057
2058 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2059 /* The data and headers are split into
2060 * separate buffers.
2061 */
2062 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2063 vlan_id);
2064 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2065 /* The data fit in a single small buffer.
2066 * Allocate a new skb, copy the data and
2067 * return the buffer to the free pool.
2068 */
2069 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2070 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002071 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2072 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2073 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2074 /* TCP packet in a page chunk that's been checksummed.
2075 * Tack it on to our GRO skb and let it go.
2076 */
2077 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2078 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002079 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2080 /* Non-TCP packet in a page chunk. Allocate an
2081 * skb, tack it on frags, and send it up.
2082 */
2083 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2084 length, vlan_id);
2085 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002086 /* Non-TCP/UDP large frames that span multiple buffers
2087 * can be processed corrrectly by the split frame logic.
2088 */
2089 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2090 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002091 }
2092
2093 return (unsigned long)length;
2094}
2095
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002096/* Process an outbound completion from an rx ring. */
2097static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2098 struct ob_mac_iocb_rsp *mac_rsp)
2099{
2100 struct tx_ring *tx_ring;
2101 struct tx_ring_desc *tx_ring_desc;
2102
2103 QL_DUMP_OB_MAC_RSP(mac_rsp);
2104 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2105 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2106 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002107 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2108 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002109 dev_kfree_skb(tx_ring_desc->skb);
2110 tx_ring_desc->skb = NULL;
2111
2112 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2113 OB_MAC_IOCB_RSP_S |
2114 OB_MAC_IOCB_RSP_L |
2115 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2116 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002117 netif_warn(qdev, tx_done, qdev->ndev,
2118 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002119 }
2120 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002121 netif_warn(qdev, tx_done, qdev->ndev,
2122 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002123 }
2124 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002125 netif_warn(qdev, tx_done, qdev->ndev,
2126 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002127 }
2128 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002129 netif_warn(qdev, tx_done, qdev->ndev,
2130 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002131 }
2132 }
2133 atomic_inc(&tx_ring->tx_count);
2134}
2135
2136/* Fire up a handler to reset the MPI processor. */
2137void ql_queue_fw_error(struct ql_adapter *qdev)
2138{
Ron Mercer6a473302009-07-02 06:06:12 +00002139 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002140 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2141}
2142
2143void ql_queue_asic_error(struct ql_adapter *qdev)
2144{
Ron Mercer6a473302009-07-02 06:06:12 +00002145 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002146 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002147 /* Clear adapter up bit to signal the recovery
2148 * process that it shouldn't kill the reset worker
2149 * thread
2150 */
2151 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002152 /* Set asic recovery bit to indicate reset process that we are
2153 * in fatal error recovery process rather than normal close
2154 */
2155 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002156 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2157}
2158
2159static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2160 struct ib_ae_iocb_rsp *ib_ae_rsp)
2161{
2162 switch (ib_ae_rsp->event) {
2163 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002164 netif_err(qdev, rx_err, qdev->ndev,
2165 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 ql_queue_fw_error(qdev);
2167 return;
2168
2169 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002170 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2171 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002172 ql_queue_asic_error(qdev);
2173 return;
2174
2175 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002176 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002177 ql_queue_asic_error(qdev);
2178 break;
2179
2180 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002181 netdev_err(qdev->ndev, "PCI error occurred when reading "
2182 "anonymous buffers from rx_ring %d.\n",
2183 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002184 ql_queue_asic_error(qdev);
2185 break;
2186
2187 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002188 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2189 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002190 ql_queue_asic_error(qdev);
2191 break;
2192 }
2193}
2194
2195static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2196{
2197 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002198 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002199 struct ob_mac_iocb_rsp *net_rsp = NULL;
2200 int count = 0;
2201
Ron Mercer1e213302009-03-09 10:59:21 +00002202 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002203 /* While there are entries in the completion queue. */
2204 while (prod != rx_ring->cnsmr_idx) {
2205
Joe Perchesae9540f72010-02-09 11:49:52 +00002206 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2208 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002209
2210 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2211 rmb();
2212 switch (net_rsp->opcode) {
2213
2214 case OPCODE_OB_MAC_TSO_IOCB:
2215 case OPCODE_OB_MAC_IOCB:
2216 ql_process_mac_tx_intr(qdev, net_rsp);
2217 break;
2218 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002219 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2220 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2221 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002222 }
2223 count++;
2224 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002225 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002226 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002227 if (!net_rsp)
2228 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002229 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002230 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002231 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002232 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002233 /*
2234 * The queue got stopped because the tx_ring was full.
2235 * Wake it up, because it's now at least 25% empty.
2236 */
Ron Mercer1e213302009-03-09 10:59:21 +00002237 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002238 }
2239
2240 return count;
2241}
2242
2243static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2244{
2245 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002246 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002247 struct ql_net_rsp_iocb *net_rsp;
2248 int count = 0;
2249
2250 /* While there are entries in the completion queue. */
2251 while (prod != rx_ring->cnsmr_idx) {
2252
Joe Perchesae9540f72010-02-09 11:49:52 +00002253 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2254 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2255 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002256
2257 net_rsp = rx_ring->curr_entry;
2258 rmb();
2259 switch (net_rsp->opcode) {
2260 case OPCODE_IB_MAC_IOCB:
2261 ql_process_mac_rx_intr(qdev, rx_ring,
2262 (struct ib_mac_iocb_rsp *)
2263 net_rsp);
2264 break;
2265
2266 case OPCODE_IB_AE_IOCB:
2267 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2268 net_rsp);
2269 break;
2270 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002271 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2272 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2273 net_rsp->opcode);
2274 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002275 }
2276 count++;
2277 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002278 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002279 if (count == budget)
2280 break;
2281 }
2282 ql_update_buffer_queues(qdev, rx_ring);
2283 ql_write_cq_idx(rx_ring);
2284 return count;
2285}
2286
2287static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2288{
2289 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2290 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002291 struct rx_ring *trx_ring;
2292 int i, work_done = 0;
2293 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002294
Joe Perchesae9540f72010-02-09 11:49:52 +00002295 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2296 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002297
Ron Mercer39aa8162009-08-27 11:02:11 +00002298 /* Service the TX rings first. They start
2299 * right after the RSS rings. */
2300 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2301 trx_ring = &qdev->rx_ring[i];
2302 /* If this TX completion ring belongs to this vector and
2303 * it's not empty then service it.
2304 */
2305 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2306 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2307 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002308 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309 "%s: Servicing TX completion ring %d.\n",
2310 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002311 ql_clean_outbound_rx_ring(trx_ring);
2312 }
2313 }
2314
2315 /*
2316 * Now service the RSS ring if it's active.
2317 */
2318 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2319 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002320 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2321 "%s: Servicing RX completion ring %d.\n",
2322 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002323 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2324 }
2325
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002326 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002327 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002328 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2329 }
2330 return work_done;
2331}
2332
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002333static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002334{
2335 struct ql_adapter *qdev = netdev_priv(ndev);
2336
Patrick McHardyf6469682013-04-19 02:04:27 +00002337 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002338 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002339 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002340 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002341 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2342 }
2343}
2344
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002345/**
2346 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2347 * based on the features to enable/disable hardware vlan accel
2348 */
2349static int qlge_update_hw_vlan_features(struct net_device *ndev,
2350 netdev_features_t features)
2351{
2352 struct ql_adapter *qdev = netdev_priv(ndev);
2353 int status = 0;
2354
2355 status = ql_adapter_down(qdev);
2356 if (status) {
2357 netif_err(qdev, link, qdev->ndev,
2358 "Failed to bring down the adapter\n");
2359 return status;
2360 }
2361
2362 /* update the features with resent change */
2363 ndev->features = features;
2364
2365 status = ql_adapter_up(qdev);
2366 if (status) {
2367 netif_err(qdev, link, qdev->ndev,
2368 "Failed to bring up the adapter\n");
2369 return status;
2370 }
2371 return status;
2372}
2373
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002374static netdev_features_t qlge_fix_features(struct net_device *ndev,
2375 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002376{
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002377 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002378
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002379 /* Update the behavior of vlan accel in the adapter */
2380 err = qlge_update_hw_vlan_features(ndev, features);
2381 if (err)
2382 return err;
2383
Jiri Pirko18c49b92011-07-21 03:24:11 +00002384 return features;
2385}
2386
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002387static int qlge_set_features(struct net_device *ndev,
2388 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002389{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002390 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002391
Patrick McHardyf6469682013-04-19 02:04:27 +00002392 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002393 qlge_vlan_mode(ndev, features);
2394
2395 return 0;
2396}
2397
Jiri Pirko8e586132011-12-08 19:52:37 -05002398static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002399{
2400 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002401 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002402
Jiri Pirko8e586132011-12-08 19:52:37 -05002403 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2404 MAC_ADDR_TYPE_VLAN, vid);
2405 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002406 netif_err(qdev, ifup, qdev->ndev,
2407 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002408 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002409}
2410
Patrick McHardy80d5c362013-04-19 02:04:28 +00002411static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002412{
2413 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002414 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002415 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002416
Ron Mercercc288f52009-02-23 10:42:14 +00002417 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2418 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002419 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002420
Jiri Pirko8e586132011-12-08 19:52:37 -05002421 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002422 set_bit(vid, qdev->active_vlans);
2423
Ron Mercercc288f52009-02-23 10:42:14 +00002424 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002425
2426 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002427}
2428
Jiri Pirko8e586132011-12-08 19:52:37 -05002429static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002430{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002431 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002432 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002433
Jiri Pirko8e586132011-12-08 19:52:37 -05002434 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2435 MAC_ADDR_TYPE_VLAN, vid);
2436 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002437 netif_err(qdev, ifup, qdev->ndev,
2438 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002439 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002440}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002441
Patrick McHardy80d5c362013-04-19 02:04:28 +00002442static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002443{
2444 struct ql_adapter *qdev = netdev_priv(ndev);
2445 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002446 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002447
2448 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2449 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002450 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002451
Jiri Pirko8e586132011-12-08 19:52:37 -05002452 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002453 clear_bit(vid, qdev->active_vlans);
2454
2455 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002456
2457 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002458}
2459
Ron Mercerc1b60092010-10-27 04:58:12 +00002460static void qlge_restore_vlan(struct ql_adapter *qdev)
2461{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002462 int status;
2463 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002464
Jiri Pirko18c49b92011-07-21 03:24:11 +00002465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2466 if (status)
2467 return;
2468
2469 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2470 __qlge_vlan_rx_add_vid(qdev, vid);
2471
2472 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002473}
2474
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002475/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2476static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2477{
2478 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002479 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002480 return IRQ_HANDLED;
2481}
2482
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002483/* This handles a fatal error, MPI activity, and the default
2484 * rx_ring in an MSI-X multiple vector environment.
2485 * In MSI/Legacy environment it also process the rest of
2486 * the rx_rings.
2487 */
2488static irqreturn_t qlge_isr(int irq, void *dev_id)
2489{
2490 struct rx_ring *rx_ring = dev_id;
2491 struct ql_adapter *qdev = rx_ring->qdev;
2492 struct intr_context *intr_context = &qdev->intr_context[0];
2493 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002494 int work_done = 0;
2495
Ron Mercerbb0d2152008-10-20 10:30:26 -07002496 spin_lock(&qdev->hw_lock);
2497 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002498 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2499 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002500 spin_unlock(&qdev->hw_lock);
2501 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002502 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002503 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002504
Ron Mercerbb0d2152008-10-20 10:30:26 -07002505 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002506
2507 /*
2508 * Check for fatal error.
2509 */
2510 if (var & STS_FE) {
2511 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002512 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002513 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002514 netdev_err(qdev->ndev, "Resetting chip. "
2515 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002516 return IRQ_HANDLED;
2517 }
2518
2519 /*
2520 * Check MPI processor activity.
2521 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002522 if ((var & STS_PI) &&
2523 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002524 /*
2525 * We've got an async event or mailbox completion.
2526 * Handle it and clear the source of the interrupt.
2527 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002528 netif_err(qdev, intr, qdev->ndev,
2529 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002530 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002531 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2532 queue_delayed_work_on(smp_processor_id(),
2533 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002534 work_done++;
2535 }
2536
2537 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002538 * Get the bit-mask that shows the active queues for this
2539 * pass. Compare it to the queues that this irq services
2540 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002541 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002542 var = ql_read32(qdev, ISR1);
2543 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002544 netif_info(qdev, intr, qdev->ndev,
2545 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002546 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002547 napi_schedule(&rx_ring->napi);
2548 work_done++;
2549 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002550 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002551 return work_done ? IRQ_HANDLED : IRQ_NONE;
2552}
2553
2554static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2555{
2556
2557 if (skb_is_gso(skb)) {
2558 int err;
françois romieubb9689e2014-03-29 12:26:27 +01002559
2560 err = skb_cow_head(skb, 0);
2561 if (err < 0)
2562 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002563
2564 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2565 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2566 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2567 mac_iocb_ptr->total_hdrs_len =
2568 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2569 mac_iocb_ptr->net_trans_offset =
2570 cpu_to_le16(skb_network_offset(skb) |
2571 skb_transport_offset(skb)
2572 << OB_MAC_TRANSPORT_HDR_SHIFT);
2573 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2574 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2575 if (likely(skb->protocol == htons(ETH_P_IP))) {
2576 struct iphdr *iph = ip_hdr(skb);
2577 iph->check = 0;
2578 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2579 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2580 iph->daddr, 0,
2581 IPPROTO_TCP,
2582 0);
2583 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2584 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2585 tcp_hdr(skb)->check =
2586 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2587 &ipv6_hdr(skb)->daddr,
2588 0, IPPROTO_TCP, 0);
2589 }
2590 return 1;
2591 }
2592 return 0;
2593}
2594
2595static void ql_hw_csum_setup(struct sk_buff *skb,
2596 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2597{
2598 int len;
2599 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002600 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002601 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2602 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2603 mac_iocb_ptr->net_trans_offset =
2604 cpu_to_le16(skb_network_offset(skb) |
2605 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2606
2607 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2608 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2609 if (likely(iph->protocol == IPPROTO_TCP)) {
2610 check = &(tcp_hdr(skb)->check);
2611 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2612 mac_iocb_ptr->total_hdrs_len =
2613 cpu_to_le16(skb_transport_offset(skb) +
2614 (tcp_hdr(skb)->doff << 2));
2615 } else {
2616 check = &(udp_hdr(skb)->check);
2617 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2618 mac_iocb_ptr->total_hdrs_len =
2619 cpu_to_le16(skb_transport_offset(skb) +
2620 sizeof(struct udphdr));
2621 }
2622 *check = ~csum_tcpudp_magic(iph->saddr,
2623 iph->daddr, len, iph->protocol, 0);
2624}
2625
Stephen Hemminger613573252009-08-31 19:50:58 +00002626static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002627{
2628 struct tx_ring_desc *tx_ring_desc;
2629 struct ob_mac_iocb_req *mac_iocb_ptr;
2630 struct ql_adapter *qdev = netdev_priv(ndev);
2631 int tso;
2632 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002633 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002634
2635 tx_ring = &qdev->tx_ring[tx_ring_idx];
2636
Ron Mercer74c50b42009-03-09 10:59:27 +00002637 if (skb_padto(skb, ETH_ZLEN))
2638 return NETDEV_TX_OK;
2639
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002640 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002641 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002642 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002643 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002644 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002645 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002646 return NETDEV_TX_BUSY;
2647 }
2648 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2649 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002650 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002651
2652 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2653 mac_iocb_ptr->tid = tx_ring_desc->index;
2654 /* We use the upper 32-bits to store the tx queue for this IO.
2655 * When we get the completion we can use it to establish the context.
2656 */
2657 mac_iocb_ptr->txq_idx = tx_ring_idx;
2658 tx_ring_desc->skb = skb;
2659
2660 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2661
Jesse Grosseab6d182010-10-20 13:56:03 +00002662 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002663 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2664 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002665 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2666 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2667 }
2668 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2669 if (tso < 0) {
2670 dev_kfree_skb_any(skb);
2671 return NETDEV_TX_OK;
2672 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2673 ql_hw_csum_setup(skb,
2674 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2675 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002676 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2677 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002678 netif_err(qdev, tx_queued, qdev->ndev,
2679 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002680 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002681 return NETDEV_TX_BUSY;
2682 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002683 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2684 tx_ring->prod_idx++;
2685 if (tx_ring->prod_idx == tx_ring->wq_len)
2686 tx_ring->prod_idx = 0;
2687 wmb();
2688
2689 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002690 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2691 "tx queued, slot %d, len %d\n",
2692 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002693
2694 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002695
2696 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2697 netif_stop_subqueue(ndev, tx_ring->wq_id);
2698 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2699 /*
2700 * The queue got stopped because the tx_ring was full.
2701 * Wake it up, because it's now at least 25% empty.
2702 */
2703 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2704 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002705 return NETDEV_TX_OK;
2706}
2707
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002708
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002709static void ql_free_shadow_space(struct ql_adapter *qdev)
2710{
2711 if (qdev->rx_ring_shadow_reg_area) {
2712 pci_free_consistent(qdev->pdev,
2713 PAGE_SIZE,
2714 qdev->rx_ring_shadow_reg_area,
2715 qdev->rx_ring_shadow_reg_dma);
2716 qdev->rx_ring_shadow_reg_area = NULL;
2717 }
2718 if (qdev->tx_ring_shadow_reg_area) {
2719 pci_free_consistent(qdev->pdev,
2720 PAGE_SIZE,
2721 qdev->tx_ring_shadow_reg_area,
2722 qdev->tx_ring_shadow_reg_dma);
2723 qdev->tx_ring_shadow_reg_area = NULL;
2724 }
2725}
2726
2727static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2728{
2729 qdev->rx_ring_shadow_reg_area =
Joe Perches440c7342014-08-08 14:24:34 -07002730 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2731 &qdev->rx_ring_shadow_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002732 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002733 netif_err(qdev, ifup, qdev->ndev,
2734 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002735 return -ENOMEM;
2736 }
Joe Perches440c7342014-08-08 14:24:34 -07002737
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002738 qdev->tx_ring_shadow_reg_area =
Joe Perches440c7342014-08-08 14:24:34 -07002739 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2740 &qdev->tx_ring_shadow_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002741 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002742 netif_err(qdev, ifup, qdev->ndev,
2743 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002744 goto err_wqp_sh_area;
2745 }
2746 return 0;
2747
2748err_wqp_sh_area:
2749 pci_free_consistent(qdev->pdev,
2750 PAGE_SIZE,
2751 qdev->rx_ring_shadow_reg_area,
2752 qdev->rx_ring_shadow_reg_dma);
2753 return -ENOMEM;
2754}
2755
2756static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2757{
2758 struct tx_ring_desc *tx_ring_desc;
2759 int i;
2760 struct ob_mac_iocb_req *mac_iocb_ptr;
2761
2762 mac_iocb_ptr = tx_ring->wq_base;
2763 tx_ring_desc = tx_ring->q;
2764 for (i = 0; i < tx_ring->wq_len; i++) {
2765 tx_ring_desc->index = i;
2766 tx_ring_desc->skb = NULL;
2767 tx_ring_desc->queue_entry = mac_iocb_ptr;
2768 mac_iocb_ptr++;
2769 tx_ring_desc++;
2770 }
2771 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002772}
2773
2774static void ql_free_tx_resources(struct ql_adapter *qdev,
2775 struct tx_ring *tx_ring)
2776{
2777 if (tx_ring->wq_base) {
2778 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2779 tx_ring->wq_base, tx_ring->wq_base_dma);
2780 tx_ring->wq_base = NULL;
2781 }
2782 kfree(tx_ring->q);
2783 tx_ring->q = NULL;
2784}
2785
2786static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2787 struct tx_ring *tx_ring)
2788{
2789 tx_ring->wq_base =
2790 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2791 &tx_ring->wq_base_dma);
2792
Joe Perches8e95a202009-12-03 07:58:21 +00002793 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002794 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2795 goto pci_alloc_err;
2796
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002797 tx_ring->q =
2798 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2799 if (tx_ring->q == NULL)
2800 goto err;
2801
2802 return 0;
2803err:
2804 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2805 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002806 tx_ring->wq_base = NULL;
2807pci_alloc_err:
2808 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002809 return -ENOMEM;
2810}
2811
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002812static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002813{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002814 struct bq_desc *lbq_desc;
2815
Ron Mercer7c734352009-10-19 03:32:19 +00002816 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002817
Ron Mercer7c734352009-10-19 03:32:19 +00002818 curr_idx = rx_ring->lbq_curr_idx;
2819 clean_idx = rx_ring->lbq_clean_idx;
2820 while (curr_idx != clean_idx) {
2821 lbq_desc = &rx_ring->lbq[curr_idx];
2822
2823 if (lbq_desc->p.pg_chunk.last_flag) {
2824 pci_unmap_page(qdev->pdev,
2825 lbq_desc->p.pg_chunk.map,
2826 ql_lbq_block_size(qdev),
2827 PCI_DMA_FROMDEVICE);
2828 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002829 }
Ron Mercer7c734352009-10-19 03:32:19 +00002830
2831 put_page(lbq_desc->p.pg_chunk.page);
2832 lbq_desc->p.pg_chunk.page = NULL;
2833
2834 if (++curr_idx == rx_ring->lbq_len)
2835 curr_idx = 0;
2836
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002837 }
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00002838 if (rx_ring->pg_chunk.page) {
2839 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2840 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2841 put_page(rx_ring->pg_chunk.page);
2842 rx_ring->pg_chunk.page = NULL;
2843 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002844}
2845
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002846static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002847{
2848 int i;
2849 struct bq_desc *sbq_desc;
2850
2851 for (i = 0; i < rx_ring->sbq_len; i++) {
2852 sbq_desc = &rx_ring->sbq[i];
2853 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002854 netif_err(qdev, ifup, qdev->ndev,
2855 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002856 return;
2857 }
2858 if (sbq_desc->p.skb) {
2859 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002860 dma_unmap_addr(sbq_desc, mapaddr),
2861 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002862 PCI_DMA_FROMDEVICE);
2863 dev_kfree_skb(sbq_desc->p.skb);
2864 sbq_desc->p.skb = NULL;
2865 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002866 }
2867}
2868
Ron Mercer4545a3f2009-02-23 10:42:17 +00002869/* Free all large and small rx buffers associated
2870 * with the completion queues for this device.
2871 */
2872static void ql_free_rx_buffers(struct ql_adapter *qdev)
2873{
2874 int i;
2875 struct rx_ring *rx_ring;
2876
2877 for (i = 0; i < qdev->rx_ring_count; i++) {
2878 rx_ring = &qdev->rx_ring[i];
2879 if (rx_ring->lbq)
2880 ql_free_lbq_buffers(qdev, rx_ring);
2881 if (rx_ring->sbq)
2882 ql_free_sbq_buffers(qdev, rx_ring);
2883 }
2884}
2885
2886static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2887{
2888 struct rx_ring *rx_ring;
2889 int i;
2890
2891 for (i = 0; i < qdev->rx_ring_count; i++) {
2892 rx_ring = &qdev->rx_ring[i];
2893 if (rx_ring->type != TX_Q)
2894 ql_update_buffer_queues(qdev, rx_ring);
2895 }
2896}
2897
2898static void ql_init_lbq_ring(struct ql_adapter *qdev,
2899 struct rx_ring *rx_ring)
2900{
2901 int i;
2902 struct bq_desc *lbq_desc;
2903 __le64 *bq = rx_ring->lbq_base;
2904
2905 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2906 for (i = 0; i < rx_ring->lbq_len; i++) {
2907 lbq_desc = &rx_ring->lbq[i];
2908 memset(lbq_desc, 0, sizeof(*lbq_desc));
2909 lbq_desc->index = i;
2910 lbq_desc->addr = bq;
2911 bq++;
2912 }
2913}
2914
2915static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002916 struct rx_ring *rx_ring)
2917{
2918 int i;
2919 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002920 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002921
Ron Mercer4545a3f2009-02-23 10:42:17 +00002922 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002923 for (i = 0; i < rx_ring->sbq_len; i++) {
2924 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002925 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002926 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002927 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002928 bq++;
2929 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002930}
2931
2932static void ql_free_rx_resources(struct ql_adapter *qdev,
2933 struct rx_ring *rx_ring)
2934{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002935 /* Free the small buffer queue. */
2936 if (rx_ring->sbq_base) {
2937 pci_free_consistent(qdev->pdev,
2938 rx_ring->sbq_size,
2939 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2940 rx_ring->sbq_base = NULL;
2941 }
2942
2943 /* Free the small buffer queue control blocks. */
2944 kfree(rx_ring->sbq);
2945 rx_ring->sbq = NULL;
2946
2947 /* Free the large buffer queue. */
2948 if (rx_ring->lbq_base) {
2949 pci_free_consistent(qdev->pdev,
2950 rx_ring->lbq_size,
2951 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2952 rx_ring->lbq_base = NULL;
2953 }
2954
2955 /* Free the large buffer queue control blocks. */
2956 kfree(rx_ring->lbq);
2957 rx_ring->lbq = NULL;
2958
2959 /* Free the rx queue. */
2960 if (rx_ring->cq_base) {
2961 pci_free_consistent(qdev->pdev,
2962 rx_ring->cq_size,
2963 rx_ring->cq_base, rx_ring->cq_base_dma);
2964 rx_ring->cq_base = NULL;
2965 }
2966}
2967
2968/* Allocate queues and buffers for this completions queue based
2969 * on the values in the parameter structure. */
2970static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2971 struct rx_ring *rx_ring)
2972{
2973
2974 /*
2975 * Allocate the completion queue for this rx_ring.
2976 */
2977 rx_ring->cq_base =
2978 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2979 &rx_ring->cq_base_dma);
2980
2981 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002982 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002983 return -ENOMEM;
2984 }
2985
2986 if (rx_ring->sbq_len) {
2987 /*
2988 * Allocate small buffer queue.
2989 */
2990 rx_ring->sbq_base =
2991 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2992 &rx_ring->sbq_base_dma);
2993
2994 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002995 netif_err(qdev, ifup, qdev->ndev,
2996 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002997 goto err_mem;
2998 }
2999
3000 /*
3001 * Allocate small buffer queue control blocks.
3002 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003003 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3004 sizeof(struct bq_desc),
3005 GFP_KERNEL);
3006 if (rx_ring->sbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003007 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003008
Ron Mercer4545a3f2009-02-23 10:42:17 +00003009 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003010 }
3011
3012 if (rx_ring->lbq_len) {
3013 /*
3014 * Allocate large buffer queue.
3015 */
3016 rx_ring->lbq_base =
3017 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3018 &rx_ring->lbq_base_dma);
3019
3020 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003021 netif_err(qdev, ifup, qdev->ndev,
3022 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003023 goto err_mem;
3024 }
3025 /*
3026 * Allocate large buffer queue control blocks.
3027 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003028 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3029 sizeof(struct bq_desc),
3030 GFP_KERNEL);
3031 if (rx_ring->lbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003032 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003033
Ron Mercer4545a3f2009-02-23 10:42:17 +00003034 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003035 }
3036
3037 return 0;
3038
3039err_mem:
3040 ql_free_rx_resources(qdev, rx_ring);
3041 return -ENOMEM;
3042}
3043
3044static void ql_tx_ring_clean(struct ql_adapter *qdev)
3045{
3046 struct tx_ring *tx_ring;
3047 struct tx_ring_desc *tx_ring_desc;
3048 int i, j;
3049
3050 /*
3051 * Loop through all queues and free
3052 * any resources.
3053 */
3054 for (j = 0; j < qdev->tx_ring_count; j++) {
3055 tx_ring = &qdev->tx_ring[j];
3056 for (i = 0; i < tx_ring->wq_len; i++) {
3057 tx_ring_desc = &tx_ring->q[i];
3058 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003059 netif_err(qdev, ifdown, qdev->ndev,
3060 "Freeing lost SKB %p, from queue %d, index %d.\n",
3061 tx_ring_desc->skb, j,
3062 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003063 ql_unmap_send(qdev, tx_ring_desc,
3064 tx_ring_desc->map_cnt);
3065 dev_kfree_skb(tx_ring_desc->skb);
3066 tx_ring_desc->skb = NULL;
3067 }
3068 }
3069 }
3070}
3071
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072static void ql_free_mem_resources(struct ql_adapter *qdev)
3073{
3074 int i;
3075
3076 for (i = 0; i < qdev->tx_ring_count; i++)
3077 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3078 for (i = 0; i < qdev->rx_ring_count; i++)
3079 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3080 ql_free_shadow_space(qdev);
3081}
3082
3083static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3084{
3085 int i;
3086
3087 /* Allocate space for our shadow registers and such. */
3088 if (ql_alloc_shadow_space(qdev))
3089 return -ENOMEM;
3090
3091 for (i = 0; i < qdev->rx_ring_count; i++) {
3092 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003093 netif_err(qdev, ifup, qdev->ndev,
3094 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003095 goto err_mem;
3096 }
3097 }
3098 /* Allocate tx queue resources */
3099 for (i = 0; i < qdev->tx_ring_count; i++) {
3100 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003101 netif_err(qdev, ifup, qdev->ndev,
3102 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003103 goto err_mem;
3104 }
3105 }
3106 return 0;
3107
3108err_mem:
3109 ql_free_mem_resources(qdev);
3110 return -ENOMEM;
3111}
3112
3113/* Set up the rx ring control block and pass it to the chip.
3114 * The control block is defined as
3115 * "Completion Queue Initialization Control Block", or cqicb.
3116 */
3117static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3118{
3119 struct cqicb *cqicb = &rx_ring->cqicb;
3120 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003121 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003122 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003123 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003124 void __iomem *doorbell_area =
3125 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3126 int err = 0;
3127 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003128 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003129 __le64 *base_indirect_ptr;
3130 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003131
3132 /* Set up the shadow registers for this ring. */
3133 rx_ring->prod_idx_sh_reg = shadow_reg;
3134 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003135 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003136 shadow_reg += sizeof(u64);
3137 shadow_reg_dma += sizeof(u64);
3138 rx_ring->lbq_base_indirect = shadow_reg;
3139 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003140 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3141 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 rx_ring->sbq_base_indirect = shadow_reg;
3143 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3144
3145 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003146 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003147 rx_ring->cnsmr_idx = 0;
3148 rx_ring->curr_entry = rx_ring->cq_base;
3149
3150 /* PCI doorbell mem area + 0x04 for valid register */
3151 rx_ring->valid_db_reg = doorbell_area + 0x04;
3152
3153 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003154 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003155
3156 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003157 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158
3159 memset((void *)cqicb, 0, sizeof(struct cqicb));
3160 cqicb->msix_vect = rx_ring->irq;
3161
Ron Mercer459caf52009-01-04 17:08:11 -08003162 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3163 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003164
Ron Mercer97345522009-01-09 11:31:50 +00003165 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003166
Ron Mercer97345522009-01-09 11:31:50 +00003167 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003168
3169 /*
3170 * Set up the control block load flags.
3171 */
3172 cqicb->flags = FLAGS_LC | /* Load queue base address */
3173 FLAGS_LV | /* Load MSI-X vector */
3174 FLAGS_LI; /* Load irq delay values */
3175 if (rx_ring->lbq_len) {
3176 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003177 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003178 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003179 page_entries = 0;
3180 do {
3181 *base_indirect_ptr = cpu_to_le64(tmp);
3182 tmp += DB_PAGE_SIZE;
3183 base_indirect_ptr++;
3184 page_entries++;
3185 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003186 cqicb->lbq_addr =
3187 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003188 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3189 (u16) rx_ring->lbq_buf_size;
3190 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3191 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3192 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003193 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003194 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003195 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003196 rx_ring->lbq_clean_idx = 0;
3197 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198 }
3199 if (rx_ring->sbq_len) {
3200 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003201 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003202 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003203 page_entries = 0;
3204 do {
3205 *base_indirect_ptr = cpu_to_le64(tmp);
3206 tmp += DB_PAGE_SIZE;
3207 base_indirect_ptr++;
3208 page_entries++;
3209 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003210 cqicb->sbq_addr =
3211 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003212 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003213 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003214 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3215 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003216 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003217 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003218 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003219 rx_ring->sbq_clean_idx = 0;
3220 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003221 }
3222 switch (rx_ring->type) {
3223 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003224 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3225 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3226 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003227 case RX_Q:
3228 /* Inbound completion handling rx_rings run in
3229 * separate NAPI contexts.
3230 */
3231 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3232 64);
3233 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3234 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3235 break;
3236 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003237 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3238 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003239 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003240 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3241 CFG_LCQ, rx_ring->cq_id);
3242 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003243 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003244 return err;
3245 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003246 return err;
3247}
3248
3249static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3250{
3251 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3252 void __iomem *doorbell_area =
3253 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3254 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3255 (tx_ring->wq_id * sizeof(u64));
3256 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3257 (tx_ring->wq_id * sizeof(u64));
3258 int err = 0;
3259
3260 /*
3261 * Assign doorbell registers for this tx_ring.
3262 */
3263 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003264 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003265 tx_ring->prod_idx = 0;
3266 /* TX PCI doorbell mem area + 0x04 */
3267 tx_ring->valid_db_reg = doorbell_area + 0x04;
3268
3269 /*
3270 * Assign shadow registers for this tx_ring.
3271 */
3272 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3273 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3274
3275 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3276 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3277 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3278 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3279 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003280 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003281
Ron Mercer97345522009-01-09 11:31:50 +00003282 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003283
3284 ql_init_tx_ring(qdev, tx_ring);
3285
Ron Mercere3324712009-07-02 06:06:13 +00003286 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003287 (u16) tx_ring->wq_id);
3288 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003289 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003290 return err;
3291 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003292 return err;
3293}
3294
3295static void ql_disable_msix(struct ql_adapter *qdev)
3296{
3297 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3298 pci_disable_msix(qdev->pdev);
3299 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3300 kfree(qdev->msi_x_entry);
3301 qdev->msi_x_entry = NULL;
3302 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3303 pci_disable_msi(qdev->pdev);
3304 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3305 }
3306}
3307
Ron Mercera4ab6132009-08-27 11:02:10 +00003308/* We start by trying to get the number of vectors
3309 * stored in qdev->intr_count. If we don't get that
3310 * many then we reduce the count and try again.
3311 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003312static void ql_enable_msix(struct ql_adapter *qdev)
3313{
Ron Mercera4ab6132009-08-27 11:02:10 +00003314 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003315
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003316 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003317 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003318 /* Try to alloc space for the msix struct,
3319 * if it fails then go to MSI/legacy.
3320 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003321 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003322 sizeof(struct msix_entry),
3323 GFP_KERNEL);
3324 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003325 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003326 goto msi;
3327 }
3328
Ron Mercera4ab6132009-08-27 11:02:10 +00003329 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003330 qdev->msi_x_entry[i].entry = i;
3331
Alexander Gordeev50b483a2014-02-18 11:11:59 +01003332 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3333 1, qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003334 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003335 kfree(qdev->msi_x_entry);
3336 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003337 netif_warn(qdev, ifup, qdev->ndev,
3338 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera5a62a12009-11-11 12:54:05 +00003339 qlge_irq_type = MSI_IRQ;
Alexander Gordeev50b483a2014-02-18 11:11:59 +01003340 } else {
3341 qdev->intr_count = err;
Ron Mercera4ab6132009-08-27 11:02:10 +00003342 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003343 netif_info(qdev, ifup, qdev->ndev,
3344 "MSI-X Enabled, got %d vectors.\n",
3345 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003346 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003347 }
3348 }
3349msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003350 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003351 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003352 if (!pci_enable_msi(qdev->pdev)) {
3353 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003354 netif_info(qdev, ifup, qdev->ndev,
3355 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003356 return;
3357 }
3358 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003359 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003360 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3361 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003362}
3363
Ron Mercer39aa8162009-08-27 11:02:11 +00003364/* Each vector services 1 RSS ring and and 1 or more
3365 * TX completion rings. This function loops through
3366 * the TX completion rings and assigns the vector that
3367 * will service it. An example would be if there are
3368 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3369 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003370 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003371 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3372 */
3373static void ql_set_tx_vect(struct ql_adapter *qdev)
3374{
3375 int i, j, vect;
3376 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3377
3378 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3379 /* Assign irq vectors to TX rx_rings.*/
3380 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3381 i < qdev->rx_ring_count; i++) {
3382 if (j == tx_rings_per_vector) {
3383 vect++;
3384 j = 0;
3385 }
3386 qdev->rx_ring[i].irq = vect;
3387 j++;
3388 }
3389 } else {
3390 /* For single vector all rings have an irq
3391 * of zero.
3392 */
3393 for (i = 0; i < qdev->rx_ring_count; i++)
3394 qdev->rx_ring[i].irq = 0;
3395 }
3396}
3397
3398/* Set the interrupt mask for this vector. Each vector
3399 * will service 1 RSS ring and 1 or more TX completion
3400 * rings. This function sets up a bit mask per vector
3401 * that indicates which rings it services.
3402 */
3403static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3404{
3405 int j, vect = ctx->intr;
3406 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3407
3408 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3409 /* Add the RSS ring serviced by this vector
3410 * to the mask.
3411 */
3412 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3413 /* Add the TX ring(s) serviced by this vector
3414 * to the mask. */
3415 for (j = 0; j < tx_rings_per_vector; j++) {
3416 ctx->irq_mask |=
3417 (1 << qdev->rx_ring[qdev->rss_ring_count +
3418 (vect * tx_rings_per_vector) + j].cq_id);
3419 }
3420 } else {
3421 /* For single vector we just shift each queue's
3422 * ID into the mask.
3423 */
3424 for (j = 0; j < qdev->rx_ring_count; j++)
3425 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3426 }
3427}
3428
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003429/*
3430 * Here we build the intr_context structures based on
3431 * our rx_ring count and intr vector count.
3432 * The intr_context structure is used to hook each vector
3433 * to possibly different handlers.
3434 */
3435static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3436{
3437 int i = 0;
3438 struct intr_context *intr_context = &qdev->intr_context[0];
3439
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003440 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3441 /* Each rx_ring has it's
3442 * own intr_context since we have separate
3443 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003444 */
3445 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3446 qdev->rx_ring[i].irq = i;
3447 intr_context->intr = i;
3448 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003449 /* Set up this vector's bit-mask that indicates
3450 * which queues it services.
3451 */
3452 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003453 /*
3454 * We set up each vectors enable/disable/read bits so
3455 * there's no bit/mask calculations in the critical path.
3456 */
3457 intr_context->intr_en_mask =
3458 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3459 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3460 | i;
3461 intr_context->intr_dis_mask =
3462 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3463 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3464 INTR_EN_IHD | i;
3465 intr_context->intr_read_mask =
3466 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3467 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3468 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003469 if (i == 0) {
3470 /* The first vector/queue handles
3471 * broadcast/multicast, fatal errors,
3472 * and firmware events. This in addition
3473 * to normal inbound NAPI processing.
3474 */
3475 intr_context->handler = qlge_isr;
3476 sprintf(intr_context->name, "%s-rx-%d",
3477 qdev->ndev->name, i);
3478 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003479 /*
3480 * Inbound queues handle unicast frames only.
3481 */
3482 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003483 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003484 qdev->ndev->name, i);
3485 }
3486 }
3487 } else {
3488 /*
3489 * All rx_rings use the same intr_context since
3490 * there is only one vector.
3491 */
3492 intr_context->intr = 0;
3493 intr_context->qdev = qdev;
3494 /*
3495 * We set up each vectors enable/disable/read bits so
3496 * there's no bit/mask calculations in the critical path.
3497 */
3498 intr_context->intr_en_mask =
3499 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3500 intr_context->intr_dis_mask =
3501 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3502 INTR_EN_TYPE_DISABLE;
3503 intr_context->intr_read_mask =
3504 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3505 /*
3506 * Single interrupt means one handler for all rings.
3507 */
3508 intr_context->handler = qlge_isr;
3509 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003510 /* Set up this vector's bit-mask that indicates
3511 * which queues it services. In this case there is
3512 * a single vector so it will service all RSS and
3513 * TX completion rings.
3514 */
3515 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003516 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003517 /* Tell the TX completion rings which MSIx vector
3518 * they will be using.
3519 */
3520 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003521}
3522
3523static void ql_free_irq(struct ql_adapter *qdev)
3524{
3525 int i;
3526 struct intr_context *intr_context = &qdev->intr_context[0];
3527
3528 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3529 if (intr_context->hooked) {
3530 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3531 free_irq(qdev->msi_x_entry[i].vector,
3532 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003533 } else {
3534 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003535 }
3536 }
3537 }
3538 ql_disable_msix(qdev);
3539}
3540
3541static int ql_request_irq(struct ql_adapter *qdev)
3542{
3543 int i;
3544 int status = 0;
3545 struct pci_dev *pdev = qdev->pdev;
3546 struct intr_context *intr_context = &qdev->intr_context[0];
3547
3548 ql_resolve_queues_to_irqs(qdev);
3549
3550 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3551 atomic_set(&intr_context->irq_cnt, 0);
3552 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3553 status = request_irq(qdev->msi_x_entry[i].vector,
3554 intr_context->handler,
3555 0,
3556 intr_context->name,
3557 &qdev->rx_ring[i]);
3558 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003559 netif_err(qdev, ifup, qdev->ndev,
3560 "Failed request for MSIX interrupt %d.\n",
3561 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003562 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 }
3564 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003565 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3566 "trying msi or legacy interrupts.\n");
3567 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3568 "%s: irq = %d.\n", __func__, pdev->irq);
3569 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3570 "%s: context->name = %s.\n", __func__,
3571 intr_context->name);
3572 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3573 "%s: dev_id = 0x%p.\n", __func__,
3574 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003575 status =
3576 request_irq(pdev->irq, qlge_isr,
3577 test_bit(QL_MSI_ENABLED,
3578 &qdev->
3579 flags) ? 0 : IRQF_SHARED,
3580 intr_context->name, &qdev->rx_ring[0]);
3581 if (status)
3582 goto err_irq;
3583
Joe Perchesae9540f72010-02-09 11:49:52 +00003584 netif_err(qdev, ifup, qdev->ndev,
3585 "Hooked intr %d, queue type %s, with name %s.\n",
3586 i,
3587 qdev->rx_ring[0].type == DEFAULT_Q ?
3588 "DEFAULT_Q" :
3589 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3590 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3591 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003592 }
3593 intr_context->hooked = 1;
3594 }
3595 return status;
3596err_irq:
Joe Perchesa42c3a22014-04-24 18:50:59 -07003597 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003598 ql_free_irq(qdev);
3599 return status;
3600}
3601
3602static int ql_start_rss(struct ql_adapter *qdev)
3603{
Joe Perches215faf92010-12-21 02:16:10 -08003604 static const u8 init_hash_seed[] = {
3605 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3606 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3607 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3608 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3609 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3610 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003611 struct ricb *ricb = &qdev->ricb;
3612 int status = 0;
3613 int i;
3614 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3615
Ron Mercere3324712009-07-02 06:06:13 +00003616 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003617
Ron Mercerb2014ff2009-08-27 11:02:09 +00003618 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003619 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003620 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3621 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003622
3623 /*
3624 * Fill out the Indirection Table.
3625 */
Ron Mercer541ae282009-10-08 09:54:37 +00003626 for (i = 0; i < 1024; i++)
3627 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003628
Ron Mercer541ae282009-10-08 09:54:37 +00003629 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3630 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003631
Ron Mercere3324712009-07-02 06:06:13 +00003632 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003633 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003634 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003635 return status;
3636 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003637 return status;
3638}
3639
Ron Mercera5f59dc2009-07-02 06:06:07 +00003640static int ql_clear_routing_entries(struct ql_adapter *qdev)
3641{
3642 int i, status = 0;
3643
3644 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3645 if (status)
3646 return status;
3647 /* Clear all the entries in the routing table. */
3648 for (i = 0; i < 16; i++) {
3649 status = ql_set_routing_reg(qdev, i, 0, 0);
3650 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003651 netif_err(qdev, ifup, qdev->ndev,
3652 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003653 break;
3654 }
3655 }
3656 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3657 return status;
3658}
3659
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003660/* Initialize the frame-to-queue routing. */
3661static int ql_route_initialize(struct ql_adapter *qdev)
3662{
3663 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003664
3665 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003666 status = ql_clear_routing_entries(qdev);
3667 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003668 return status;
3669
3670 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3671 if (status)
3672 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003673
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003674 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3675 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003676 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003677 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003678 "Failed to init routing register "
3679 "for IP CSUM error packets.\n");
3680 goto exit;
3681 }
3682 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3683 RT_IDX_TU_CSUM_ERR, 1);
3684 if (status) {
3685 netif_err(qdev, ifup, qdev->ndev,
3686 "Failed to init routing register "
3687 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003688 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003689 }
3690 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3691 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003692 netif_err(qdev, ifup, qdev->ndev,
3693 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003694 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003695 }
3696 /* If we have more than one inbound queue, then turn on RSS in the
3697 * routing block.
3698 */
3699 if (qdev->rss_ring_count > 1) {
3700 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3701 RT_IDX_RSS_MATCH, 1);
3702 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003703 netif_err(qdev, ifup, qdev->ndev,
3704 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003705 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003706 }
3707 }
3708
3709 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3710 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003711 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003712 netif_err(qdev, ifup, qdev->ndev,
3713 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003714exit:
3715 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003716 return status;
3717}
3718
Ron Mercer2ee1e272009-03-03 12:10:33 +00003719int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003720{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003721 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003722
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003723 /* If check if the link is up and use to
3724 * determine if we are setting or clearing
3725 * the MAC address in the CAM.
3726 */
3727 set = ql_read32(qdev, STS);
3728 set &= qdev->port_link_up;
3729 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003730 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003731 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003732 return status;
3733 }
3734
3735 status = ql_route_initialize(qdev);
3736 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003737 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003738
3739 return status;
3740}
3741
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003742static int ql_adapter_initialize(struct ql_adapter *qdev)
3743{
3744 u32 value, mask;
3745 int i;
3746 int status = 0;
3747
3748 /*
3749 * Set up the System register to halt on errors.
3750 */
3751 value = SYS_EFE | SYS_FAE;
3752 mask = value << 16;
3753 ql_write32(qdev, SYS, mask | value);
3754
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003755 /* Set the default queue, and VLAN behavior. */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04003756 value = NIC_RCV_CFG_DFQ;
3757 mask = NIC_RCV_CFG_DFQ_MASK;
3758 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3759 value |= NIC_RCV_CFG_RV;
3760 mask |= (NIC_RCV_CFG_RV << 16);
3761 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003762 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3763
3764 /* Set the MPI interrupt to enabled. */
3765 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3766
3767 /* Enable the function, set pagesize, enable error checking. */
3768 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003769 FSC_EC | FSC_VM_PAGE_4K;
3770 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003771
3772 /* Set/clear header splitting. */
3773 mask = FSC_VM_PAGESIZE_MASK |
3774 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3775 ql_write32(qdev, FSC, mask | value);
3776
Ron Mercer572c5262010-01-02 10:37:42 +00003777 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003778
Ron Mercera3b71932009-10-08 09:54:38 +00003779 /* Set RX packet routing to use port/pci function on which the
3780 * packet arrived on in addition to usual frame routing.
3781 * This is helpful on bonding where both interfaces can have
3782 * the same MAC address.
3783 */
3784 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003785 /* Reroute all packets to our Interface.
3786 * They may have been routed to MPI firmware
3787 * due to WOL.
3788 */
3789 value = ql_read32(qdev, MGMT_RCV_CFG);
3790 value &= ~MGMT_RCV_CFG_RM;
3791 mask = 0xffff0000;
3792
3793 /* Sticky reg needs clearing due to WOL. */
3794 ql_write32(qdev, MGMT_RCV_CFG, mask);
3795 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3796
3797 /* Default WOL is enable on Mezz cards */
3798 if (qdev->pdev->subsystem_device == 0x0068 ||
3799 qdev->pdev->subsystem_device == 0x0180)
3800 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003801
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003802 /* Start up the rx queues. */
3803 for (i = 0; i < qdev->rx_ring_count; i++) {
3804 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3805 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003806 netif_err(qdev, ifup, qdev->ndev,
3807 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808 return status;
3809 }
3810 }
3811
3812 /* If there is more than one inbound completion queue
3813 * then download a RICB to configure RSS.
3814 */
3815 if (qdev->rss_ring_count > 1) {
3816 status = ql_start_rss(qdev);
3817 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003818 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003819 return status;
3820 }
3821 }
3822
3823 /* Start up the tx queues. */
3824 for (i = 0; i < qdev->tx_ring_count; i++) {
3825 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3826 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003827 netif_err(qdev, ifup, qdev->ndev,
3828 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003829 return status;
3830 }
3831 }
3832
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003833 /* Initialize the port and set the max framesize. */
3834 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003835 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003836 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003837
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003838 /* Set up the MAC address and frame routing filter. */
3839 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003840 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003841 netif_err(qdev, ifup, qdev->ndev,
3842 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003843 return status;
3844 }
3845
3846 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003847 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003848 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003849
3850 return status;
3851}
3852
3853/* Issue soft reset to chip. */
3854static int ql_adapter_reset(struct ql_adapter *qdev)
3855{
3856 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003857 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003858 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003859
Ron Mercera5f59dc2009-07-02 06:06:07 +00003860 /* Clear all the entries in the routing table. */
3861 status = ql_clear_routing_entries(qdev);
3862 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003863 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003864 return status;
3865 }
3866
3867 end_jiffies = jiffies +
3868 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003869
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003870 /* Check if bit is set then skip the mailbox command and
3871 * clear the bit, else we are in normal reset process.
3872 */
3873 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3874 /* Stop management traffic. */
3875 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003876
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003877 /* Wait for the NIC and MGMNT FIFOs to empty. */
3878 ql_wait_fifo_empty(qdev);
3879 } else
3880 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003881
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003882 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003883
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003884 do {
3885 value = ql_read32(qdev, RST_FO);
3886 if ((value & RST_FO_FR) == 0)
3887 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003888 cpu_relax();
3889 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003890
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003891 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003892 netif_err(qdev, ifdown, qdev->ndev,
3893 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003894 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003895 }
3896
Ron Mercer84087f42009-10-08 09:54:41 +00003897 /* Resume management traffic. */
3898 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003899 return status;
3900}
3901
3902static void ql_display_dev_info(struct net_device *ndev)
3903{
Joe Perchesb16fed02010-11-15 11:12:28 +00003904 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003905
Joe Perchesae9540f72010-02-09 11:49:52 +00003906 netif_info(qdev, probe, qdev->ndev,
3907 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3908 "XG Roll = %d, XG Rev = %d.\n",
3909 qdev->func,
3910 qdev->port,
3911 qdev->chip_rev_id & 0x0000000f,
3912 qdev->chip_rev_id >> 4 & 0x0000000f,
3913 qdev->chip_rev_id >> 8 & 0x0000000f,
3914 qdev->chip_rev_id >> 12 & 0x0000000f);
3915 netif_info(qdev, probe, qdev->ndev,
3916 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003917}
3918
stephen hemmingerac409212010-10-21 07:50:54 +00003919static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003920{
3921 int status = 0;
3922 u32 wol = MB_WOL_DISABLE;
3923
3924 /* The CAM is still intact after a reset, but if we
3925 * are doing WOL, then we may need to program the
3926 * routing regs. We would also need to issue the mailbox
3927 * commands to instruct the MPI what to do per the ethtool
3928 * settings.
3929 */
3930
3931 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3932 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003933 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003934 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003935 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003936 return -EINVAL;
3937 }
3938
3939 if (qdev->wol & WAKE_MAGIC) {
3940 status = ql_mb_wol_set_magic(qdev, 1);
3941 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003942 netif_err(qdev, ifdown, qdev->ndev,
3943 "Failed to set magic packet on %s.\n",
3944 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003945 return status;
3946 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003947 netif_info(qdev, drv, qdev->ndev,
3948 "Enabled magic packet successfully on %s.\n",
3949 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003950
3951 wol |= MB_WOL_MAGIC_PKT;
3952 }
3953
3954 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003955 wol |= MB_WOL_MODE_ON;
3956 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003957 netif_err(qdev, drv, qdev->ndev,
3958 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003959 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003960 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003961 }
3962
3963 return status;
3964}
3965
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003966static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003967{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003968
Ron Mercer6497b602009-02-12 16:37:13 -08003969 /* Don't kill the reset worker thread if we
3970 * are in the process of recovery.
3971 */
3972 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3973 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003974 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3975 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003976 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003977 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003978 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003979}
3980
3981static int ql_adapter_down(struct ql_adapter *qdev)
3982{
3983 int i, status = 0;
3984
3985 ql_link_off(qdev);
3986
3987 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003988
Ron Mercer39aa8162009-08-27 11:02:11 +00003989 for (i = 0; i < qdev->rss_ring_count; i++)
3990 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003991
3992 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3993
3994 ql_disable_interrupts(qdev);
3995
3996 ql_tx_ring_clean(qdev);
3997
Ron Mercer6b318cb2009-03-09 10:59:26 +00003998 /* Call netif_napi_del() from common point.
3999 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004000 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00004001 netif_napi_del(&qdev->rx_ring[i].napi);
4002
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004003 status = ql_adapter_reset(qdev);
4004 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004005 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4006 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00004007 ql_free_rx_buffers(qdev);
4008
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004009 return status;
4010}
4011
4012static int ql_adapter_up(struct ql_adapter *qdev)
4013{
4014 int err = 0;
4015
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004016 err = ql_adapter_initialize(qdev);
4017 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004018 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004019 goto err_init;
4020 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004021 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00004022 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00004023 /* If the port is initialized and the
4024 * link is up the turn on the carrier.
4025 */
4026 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4027 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00004028 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004029 /* Restore rx mode. */
4030 clear_bit(QL_ALLMULTI, &qdev->flags);
4031 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4032 qlge_set_multicast_list(qdev->ndev);
4033
Ron Mercerc1b60092010-10-27 04:58:12 +00004034 /* Restore vlan setting. */
4035 qlge_restore_vlan(qdev);
4036
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004037 ql_enable_interrupts(qdev);
4038 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004039 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004040
4041 return 0;
4042err_init:
4043 ql_adapter_reset(qdev);
4044 return err;
4045}
4046
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004047static void ql_release_adapter_resources(struct ql_adapter *qdev)
4048{
4049 ql_free_mem_resources(qdev);
4050 ql_free_irq(qdev);
4051}
4052
4053static int ql_get_adapter_resources(struct ql_adapter *qdev)
4054{
4055 int status = 0;
4056
4057 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004058 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004059 return -ENOMEM;
4060 }
4061 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004062 return status;
4063}
4064
4065static int qlge_close(struct net_device *ndev)
4066{
4067 struct ql_adapter *qdev = netdev_priv(ndev);
4068
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004069 /* If we hit pci_channel_io_perm_failure
4070 * failure condition, then we already
4071 * brought the adapter down.
4072 */
4073 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004074 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004075 clear_bit(QL_EEH_FATAL, &qdev->flags);
4076 return 0;
4077 }
4078
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004079 /*
4080 * Wait for device to recover from a reset.
4081 * (Rarely happens, but possible.)
4082 */
4083 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4084 msleep(1);
4085 ql_adapter_down(qdev);
4086 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004087 return 0;
4088}
4089
4090static int ql_configure_rings(struct ql_adapter *qdev)
4091{
4092 int i;
4093 struct rx_ring *rx_ring;
4094 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004095 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004096 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4097 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4098
4099 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004100
Ron Mercera4ab6132009-08-27 11:02:10 +00004101 /* In a perfect world we have one RSS ring for each CPU
4102 * and each has it's own vector. To do that we ask for
4103 * cpu_cnt vectors. ql_enable_msix() will adjust the
4104 * vector count to what we actually get. We then
4105 * allocate an RSS ring for each.
4106 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004107 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004108 qdev->intr_count = cpu_cnt;
4109 ql_enable_msix(qdev);
4110 /* Adjust the RSS ring count to the actual vector count. */
4111 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004112 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004113 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004114
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004115 for (i = 0; i < qdev->tx_ring_count; i++) {
4116 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004117 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004118 tx_ring->qdev = qdev;
4119 tx_ring->wq_id = i;
4120 tx_ring->wq_len = qdev->tx_ring_size;
4121 tx_ring->wq_size =
4122 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4123
4124 /*
4125 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004126 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004127 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004128 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004129 }
4130
4131 for (i = 0; i < qdev->rx_ring_count; i++) {
4132 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004133 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004134 rx_ring->qdev = qdev;
4135 rx_ring->cq_id = i;
4136 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004137 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004138 /*
4139 * Inbound (RSS) queues.
4140 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004141 rx_ring->cq_len = qdev->rx_ring_size;
4142 rx_ring->cq_size =
4143 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4144 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4145 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004146 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004147 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004148 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4149 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004150 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004151 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004152 rx_ring->type = RX_Q;
4153 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004154 /*
4155 * Outbound queue handles outbound completions only.
4156 */
4157 /* outbound cq is same size as tx_ring it services. */
4158 rx_ring->cq_len = qdev->tx_ring_size;
4159 rx_ring->cq_size =
4160 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4161 rx_ring->lbq_len = 0;
4162 rx_ring->lbq_size = 0;
4163 rx_ring->lbq_buf_size = 0;
4164 rx_ring->sbq_len = 0;
4165 rx_ring->sbq_size = 0;
4166 rx_ring->sbq_buf_size = 0;
4167 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004168 }
4169 }
4170 return 0;
4171}
4172
4173static int qlge_open(struct net_device *ndev)
4174{
4175 int err = 0;
4176 struct ql_adapter *qdev = netdev_priv(ndev);
4177
Ron Mercer74e12432009-11-11 12:54:04 +00004178 err = ql_adapter_reset(qdev);
4179 if (err)
4180 return err;
4181
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004182 err = ql_configure_rings(qdev);
4183 if (err)
4184 return err;
4185
4186 err = ql_get_adapter_resources(qdev);
4187 if (err)
4188 goto error_up;
4189
4190 err = ql_adapter_up(qdev);
4191 if (err)
4192 goto error_up;
4193
4194 return err;
4195
4196error_up:
4197 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004198 return err;
4199}
4200
Ron Mercer7c734352009-10-19 03:32:19 +00004201static int ql_change_rx_buffers(struct ql_adapter *qdev)
4202{
4203 struct rx_ring *rx_ring;
4204 int i, status;
4205 u32 lbq_buf_len;
4206
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004207 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004208 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4209 int i = 3;
4210 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004211 netif_err(qdev, ifup, qdev->ndev,
4212 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004213 ssleep(1);
4214 }
4215
4216 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004217 netif_err(qdev, ifup, qdev->ndev,
4218 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004219 return -ETIMEDOUT;
4220 }
4221 }
4222
4223 status = ql_adapter_down(qdev);
4224 if (status)
4225 goto error;
4226
4227 /* Get the new rx buffer size. */
4228 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4229 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4230 qdev->lbq_buf_order = get_order(lbq_buf_len);
4231
4232 for (i = 0; i < qdev->rss_ring_count; i++) {
4233 rx_ring = &qdev->rx_ring[i];
4234 /* Set the new size. */
4235 rx_ring->lbq_buf_size = lbq_buf_len;
4236 }
4237
4238 status = ql_adapter_up(qdev);
4239 if (status)
4240 goto error;
4241
4242 return status;
4243error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004244 netif_alert(qdev, ifup, qdev->ndev,
4245 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004246 set_bit(QL_ADAPTER_UP, &qdev->flags);
4247 dev_close(qdev->ndev);
4248 return status;
4249}
4250
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004251static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4252{
4253 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004254 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004255
4256 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004257 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004258 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004259 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004260 } else
4261 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004262
4263 queue_delayed_work(qdev->workqueue,
4264 &qdev->mpi_port_cfg_work, 3*HZ);
4265
Breno Leitao746079d2010-02-04 10:11:19 +00004266 ndev->mtu = new_mtu;
4267
Ron Mercer7c734352009-10-19 03:32:19 +00004268 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004269 return 0;
4270 }
4271
Ron Mercer7c734352009-10-19 03:32:19 +00004272 status = ql_change_rx_buffers(qdev);
4273 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004274 netif_err(qdev, ifup, qdev->ndev,
4275 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004276 }
4277
4278 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004279}
4280
4281static struct net_device_stats *qlge_get_stats(struct net_device
4282 *ndev)
4283{
Ron Mercer885ee392009-11-03 13:49:31 +00004284 struct ql_adapter *qdev = netdev_priv(ndev);
4285 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4286 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4287 unsigned long pkts, mcast, dropped, errors, bytes;
4288 int i;
4289
4290 /* Get RX stats. */
4291 pkts = mcast = dropped = errors = bytes = 0;
4292 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4293 pkts += rx_ring->rx_packets;
4294 bytes += rx_ring->rx_bytes;
4295 dropped += rx_ring->rx_dropped;
4296 errors += rx_ring->rx_errors;
4297 mcast += rx_ring->rx_multicast;
4298 }
4299 ndev->stats.rx_packets = pkts;
4300 ndev->stats.rx_bytes = bytes;
4301 ndev->stats.rx_dropped = dropped;
4302 ndev->stats.rx_errors = errors;
4303 ndev->stats.multicast = mcast;
4304
4305 /* Get TX stats. */
4306 pkts = errors = bytes = 0;
4307 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4308 pkts += tx_ring->tx_packets;
4309 bytes += tx_ring->tx_bytes;
4310 errors += tx_ring->tx_errors;
4311 }
4312 ndev->stats.tx_packets = pkts;
4313 ndev->stats.tx_bytes = bytes;
4314 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004315 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004316}
4317
stephen hemmingerac409212010-10-21 07:50:54 +00004318static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004319{
Joe Perchesb16fed02010-11-15 11:12:28 +00004320 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004321 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004322 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004323
Ron Mercercc288f52009-02-23 10:42:14 +00004324 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4325 if (status)
4326 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327 /*
4328 * Set or clear promiscuous mode if a
4329 * transition is taking place.
4330 */
4331 if (ndev->flags & IFF_PROMISC) {
4332 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4333 if (ql_set_routing_reg
4334 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004335 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004336 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004337 } else {
4338 set_bit(QL_PROMISCUOUS, &qdev->flags);
4339 }
4340 }
4341 } else {
4342 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4343 if (ql_set_routing_reg
4344 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004345 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004346 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004347 } else {
4348 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4349 }
4350 }
4351 }
4352
4353 /*
4354 * Set or clear all multicast mode if a
4355 * transition is taking place.
4356 */
4357 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004358 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004359 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4360 if (ql_set_routing_reg
4361 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004362 netif_err(qdev, hw, qdev->ndev,
4363 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004364 } else {
4365 set_bit(QL_ALLMULTI, &qdev->flags);
4366 }
4367 }
4368 } else {
4369 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4370 if (ql_set_routing_reg
4371 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004372 netif_err(qdev, hw, qdev->ndev,
4373 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004374 } else {
4375 clear_bit(QL_ALLMULTI, &qdev->flags);
4376 }
4377 }
4378 }
4379
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004380 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004381 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4382 if (status)
4383 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004384 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004385 netdev_for_each_mc_addr(ha, ndev) {
4386 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004387 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004388 netif_err(qdev, hw, qdev->ndev,
4389 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004390 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004391 goto exit;
4392 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004393 i++;
4394 }
Ron Mercercc288f52009-02-23 10:42:14 +00004395 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004396 if (ql_set_routing_reg
4397 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004398 netif_err(qdev, hw, qdev->ndev,
4399 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004400 } else {
4401 set_bit(QL_ALLMULTI, &qdev->flags);
4402 }
4403 }
4404exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004405 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004406}
4407
4408static int qlge_set_mac_address(struct net_device *ndev, void *p)
4409{
Joe Perchesb16fed02010-11-15 11:12:28 +00004410 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004411 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004412 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004413
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004414 if (!is_valid_ether_addr(addr->sa_data))
4415 return -EADDRNOTAVAIL;
4416 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004417 /* Update local copy of current mac address. */
4418 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004419
Ron Mercercc288f52009-02-23 10:42:14 +00004420 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4421 if (status)
4422 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004423 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4424 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004425 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004426 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004427 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4428 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004429}
4430
4431static void qlge_tx_timeout(struct net_device *ndev)
4432{
Joe Perchesb16fed02010-11-15 11:12:28 +00004433 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004434 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004435}
4436
4437static void ql_asic_reset_work(struct work_struct *work)
4438{
4439 struct ql_adapter *qdev =
4440 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004441 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004442 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004443 status = ql_adapter_down(qdev);
4444 if (status)
4445 goto error;
4446
4447 status = ql_adapter_up(qdev);
4448 if (status)
4449 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004450
4451 /* Restore rx mode. */
4452 clear_bit(QL_ALLMULTI, &qdev->flags);
4453 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4454 qlge_set_multicast_list(qdev->ndev);
4455
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004456 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004457 return;
4458error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004459 netif_alert(qdev, ifup, qdev->ndev,
4460 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004461
Ron Mercerdb988122009-03-09 10:59:17 +00004462 set_bit(QL_ADAPTER_UP, &qdev->flags);
4463 dev_close(qdev->ndev);
4464 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004465}
4466
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004467static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004468 .get_flash = ql_get_8012_flash_params,
4469 .port_initialize = ql_8012_port_initialize,
4470};
4471
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004472static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004473 .get_flash = ql_get_8000_flash_params,
4474 .port_initialize = ql_8000_port_initialize,
4475};
4476
Ron Mercere4552f52009-06-09 05:39:32 +00004477/* Find the pcie function number for the other NIC
4478 * on this chip. Since both NIC functions share a
4479 * common firmware we have the lowest enabled function
4480 * do any common work. Examples would be resetting
4481 * after a fatal firmware error, or doing a firmware
4482 * coredump.
4483 */
4484static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004485{
Ron Mercere4552f52009-06-09 05:39:32 +00004486 int status = 0;
4487 u32 temp;
4488 u32 nic_func1, nic_func2;
4489
4490 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4491 &temp);
4492 if (status)
4493 return status;
4494
4495 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4496 MPI_TEST_NIC_FUNC_MASK);
4497 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4498 MPI_TEST_NIC_FUNC_MASK);
4499
4500 if (qdev->func == nic_func1)
4501 qdev->alt_func = nic_func2;
4502 else if (qdev->func == nic_func2)
4503 qdev->alt_func = nic_func1;
4504 else
4505 status = -EIO;
4506
4507 return status;
4508}
4509
4510static int ql_get_board_info(struct ql_adapter *qdev)
4511{
4512 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004513 qdev->func =
4514 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004515 if (qdev->func > 3)
4516 return -EIO;
4517
4518 status = ql_get_alt_pcie_func(qdev);
4519 if (status)
4520 return status;
4521
4522 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4523 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004524 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4525 qdev->port_link_up = STS_PL1;
4526 qdev->port_init = STS_PI1;
4527 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4528 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4529 } else {
4530 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4531 qdev->port_link_up = STS_PL0;
4532 qdev->port_init = STS_PI0;
4533 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4534 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4535 }
4536 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004537 qdev->device_id = qdev->pdev->device;
4538 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4539 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004540 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4541 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004542 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004543}
4544
4545static void ql_release_all(struct pci_dev *pdev)
4546{
4547 struct net_device *ndev = pci_get_drvdata(pdev);
4548 struct ql_adapter *qdev = netdev_priv(ndev);
4549
4550 if (qdev->workqueue) {
4551 destroy_workqueue(qdev->workqueue);
4552 qdev->workqueue = NULL;
4553 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004554
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004555 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004556 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004557 if (qdev->doorbell_area)
4558 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004559 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004560 pci_release_regions(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004561}
4562
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004563static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4564 int cards_found)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004565{
4566 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004567 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004568
Ron Mercere3324712009-07-02 06:06:13 +00004569 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004570 err = pci_enable_device(pdev);
4571 if (err) {
4572 dev_err(&pdev->dev, "PCI device enable failed.\n");
4573 return err;
4574 }
4575
Ron Mercerebd6e772009-09-29 08:39:25 +00004576 qdev->ndev = ndev;
4577 qdev->pdev = pdev;
4578 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004579
Ron Mercerbc9167f2009-10-10 09:35:04 +00004580 /* Set PCIe read request size */
4581 err = pcie_set_readrq(pdev, 4096);
4582 if (err) {
4583 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004584 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004585 }
4586
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004587 err = pci_request_regions(pdev, DRV_NAME);
4588 if (err) {
4589 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004590 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004591 }
4592
4593 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004594 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004595 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004596 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004597 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004598 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004599 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004600 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004601 }
4602
4603 if (err) {
4604 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004605 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004606 }
4607
Ron Mercer73475332009-11-06 07:44:58 +00004608 /* Set PCIe reset type for EEH to fundamental. */
4609 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004610 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004611 qdev->reg_base =
4612 ioremap_nocache(pci_resource_start(pdev, 1),
4613 pci_resource_len(pdev, 1));
4614 if (!qdev->reg_base) {
4615 dev_err(&pdev->dev, "Register mapping failed.\n");
4616 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004617 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004618 }
4619
4620 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4621 qdev->doorbell_area =
4622 ioremap_nocache(pci_resource_start(pdev, 3),
4623 pci_resource_len(pdev, 3));
4624 if (!qdev->doorbell_area) {
4625 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4626 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004627 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004628 }
4629
Ron Mercere4552f52009-06-09 05:39:32 +00004630 err = ql_get_board_info(qdev);
4631 if (err) {
4632 dev_err(&pdev->dev, "Register access failed.\n");
4633 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004634 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004635 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004636 qdev->msg_enable = netif_msg_init(debug, default_msg);
4637 spin_lock_init(&qdev->hw_lock);
4638 spin_lock_init(&qdev->stats_lock);
4639
Ron Mercer8aae2602010-01-15 13:31:28 +00004640 if (qlge_mpi_coredump) {
4641 qdev->mpi_coredump =
4642 vmalloc(sizeof(struct ql_mpi_coredump));
4643 if (qdev->mpi_coredump == NULL) {
Ron Mercer8aae2602010-01-15 13:31:28 +00004644 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004645 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004646 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004647 if (qlge_force_coredump)
4648 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004649 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004650 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004651 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004652 if (err) {
4653 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004654 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004655 }
4656
Ron Mercer801e9092010-02-17 06:41:22 +00004657 /* Keep local copy of current mac address. */
4658 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004659
4660 /* Set up the default ring sizes. */
4661 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4662 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4663
4664 /* Set up the coalescing parameters. */
4665 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4666 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4667 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4668 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4669
4670 /*
4671 * Set up the operating parameters.
4672 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004673 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4674 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4675 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4676 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004677 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004678 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004679 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004680 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004681 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004682
4683 if (!cards_found) {
4684 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4685 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4686 DRV_NAME, DRV_VERSION);
4687 }
4688 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004689err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004690 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004691err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004692 pci_disable_device(pdev);
4693 return err;
4694}
4695
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004696static const struct net_device_ops qlge_netdev_ops = {
4697 .ndo_open = qlge_open,
4698 .ndo_stop = qlge_close,
4699 .ndo_start_xmit = qlge_send,
4700 .ndo_change_mtu = qlge_change_mtu,
4701 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004702 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004703 .ndo_set_mac_address = qlge_set_mac_address,
4704 .ndo_validate_addr = eth_validate_addr,
4705 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004706 .ndo_fix_features = qlge_fix_features,
4707 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004708 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4709 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004710};
4711
Ron Mercer15c052f2010-02-04 13:32:46 -08004712static void ql_timer(unsigned long data)
4713{
4714 struct ql_adapter *qdev = (struct ql_adapter *)data;
4715 u32 var = 0;
4716
4717 var = ql_read32(qdev, STS);
4718 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004719 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004720 return;
4721 }
4722
Breno Leitao72046d82010-07-01 03:00:17 +00004723 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004724}
4725
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004726static int qlge_probe(struct pci_dev *pdev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004727 const struct pci_device_id *pci_entry)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004728{
4729 struct net_device *ndev = NULL;
4730 struct ql_adapter *qdev = NULL;
4731 static int cards_found = 0;
4732 int err = 0;
4733
Ron Mercer1e213302009-03-09 10:59:21 +00004734 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004735 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004736 if (!ndev)
4737 return -ENOMEM;
4738
4739 err = ql_init_device(pdev, ndev, cards_found);
4740 if (err < 0) {
4741 free_netdev(ndev);
4742 return err;
4743 }
4744
4745 qdev = netdev_priv(ndev);
4746 SET_NETDEV_DEV(ndev, &pdev->dev);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04004747 ndev->hw_features = NETIF_F_SG |
4748 NETIF_F_IP_CSUM |
4749 NETIF_F_TSO |
4750 NETIF_F_TSO_ECN |
4751 NETIF_F_HW_VLAN_CTAG_TX |
4752 NETIF_F_HW_VLAN_CTAG_RX |
4753 NETIF_F_HW_VLAN_CTAG_FILTER |
4754 NETIF_F_RXCSUM;
4755 ndev->features = ndev->hw_features;
brenohl@br.ibm.com1a0150a92012-07-27 08:54:52 +00004756 ndev->vlan_features = ndev->hw_features;
Jitendra Kalsaria51bb3522014-01-14 13:57:25 -05004757 /* vlan gets same features (except vlan filter) */
Vlad Yasevichf6d1ac42014-03-27 22:14:46 -04004758 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4759 NETIF_F_HW_VLAN_CTAG_TX |
4760 NETIF_F_HW_VLAN_CTAG_RX);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004761
4762 if (test_bit(QL_DMA64, &qdev->flags))
4763 ndev->features |= NETIF_F_HIGHDMA;
4764
4765 /*
4766 * Set up net_device structure.
4767 */
4768 ndev->tx_queue_len = qdev->tx_ring_size;
4769 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004770
4771 ndev->netdev_ops = &qlge_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004772 ndev->ethtool_ops = &qlge_ethtool_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004773 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004774
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004775 err = register_netdev(ndev);
4776 if (err) {
4777 dev_err(&pdev->dev, "net device registration failed.\n");
4778 ql_release_all(pdev);
4779 pci_disable_device(pdev);
Wei Yongjun4d2593c2013-05-22 23:09:50 +00004780 free_netdev(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004781 return err;
4782 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004783 /* Start up the timer to trigger EEH if
4784 * the bus goes dead
4785 */
4786 init_timer_deferrable(&qdev->timer);
4787 qdev->timer.data = (unsigned long)qdev;
4788 qdev->timer.function = ql_timer;
4789 qdev->timer.expires = jiffies + (5*HZ);
4790 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004791 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004792 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004793 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004794 cards_found++;
4795 return 0;
4796}
4797
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004798netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4799{
4800 return qlge_send(skb, ndev);
4801}
4802
4803int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4804{
4805 return ql_clean_inbound_rx_ring(rx_ring, budget);
4806}
4807
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004808static void qlge_remove(struct pci_dev *pdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004809{
4810 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004811 struct ql_adapter *qdev = netdev_priv(ndev);
4812 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004813 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004814 unregister_netdev(ndev);
4815 ql_release_all(pdev);
4816 pci_disable_device(pdev);
4817 free_netdev(ndev);
4818}
4819
Ron Mercer6d190c62009-10-28 08:39:20 +00004820/* Clean up resources without touching hardware. */
4821static void ql_eeh_close(struct net_device *ndev)
4822{
4823 int i;
4824 struct ql_adapter *qdev = netdev_priv(ndev);
4825
4826 if (netif_carrier_ok(ndev)) {
4827 netif_carrier_off(ndev);
4828 netif_stop_queue(ndev);
4829 }
4830
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004831 /* Disabling the timer */
4832 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004833 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004834
4835 for (i = 0; i < qdev->rss_ring_count; i++)
4836 netif_napi_del(&qdev->rx_ring[i].napi);
4837
4838 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4839 ql_tx_ring_clean(qdev);
4840 ql_free_rx_buffers(qdev);
4841 ql_release_adapter_resources(qdev);
4842}
4843
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004844/*
4845 * This callback is called by the PCI subsystem whenever
4846 * a PCI bus error is detected.
4847 */
4848static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4849 enum pci_channel_state state)
4850{
4851 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004852 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004853
Ron Mercer6d190c62009-10-28 08:39:20 +00004854 switch (state) {
4855 case pci_channel_io_normal:
4856 return PCI_ERS_RESULT_CAN_RECOVER;
4857 case pci_channel_io_frozen:
4858 netif_device_detach(ndev);
4859 if (netif_running(ndev))
4860 ql_eeh_close(ndev);
4861 pci_disable_device(pdev);
4862 return PCI_ERS_RESULT_NEED_RESET;
4863 case pci_channel_io_perm_failure:
4864 dev_err(&pdev->dev,
4865 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004866 ql_eeh_close(ndev);
4867 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004868 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004869 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004870
4871 /* Request a slot reset. */
4872 return PCI_ERS_RESULT_NEED_RESET;
4873}
4874
4875/*
4876 * This callback is called after the PCI buss has been reset.
4877 * Basically, this tries to restart the card from scratch.
4878 * This is a shortened version of the device probe/discovery code,
4879 * it resembles the first-half of the () routine.
4880 */
4881static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4882{
4883 struct net_device *ndev = pci_get_drvdata(pdev);
4884 struct ql_adapter *qdev = netdev_priv(ndev);
4885
Ron Mercer6d190c62009-10-28 08:39:20 +00004886 pdev->error_state = pci_channel_io_normal;
4887
4888 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004889 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004890 netif_err(qdev, ifup, qdev->ndev,
4891 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004892 return PCI_ERS_RESULT_DISCONNECT;
4893 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004894 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004895
4896 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004897 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004898 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004899 return PCI_ERS_RESULT_DISCONNECT;
4900 }
4901
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004902 return PCI_ERS_RESULT_RECOVERED;
4903}
4904
4905static void qlge_io_resume(struct pci_dev *pdev)
4906{
4907 struct net_device *ndev = pci_get_drvdata(pdev);
4908 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004909 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004910
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004911 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004912 err = qlge_open(ndev);
4913 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004914 netif_err(qdev, ifup, qdev->ndev,
4915 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004916 return;
4917 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004918 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004919 netif_err(qdev, ifup, qdev->ndev,
4920 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004921 }
Breno Leitao72046d82010-07-01 03:00:17 +00004922 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004923 netif_device_attach(ndev);
4924}
4925
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004926static const struct pci_error_handlers qlge_err_handler = {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004927 .error_detected = qlge_io_error_detected,
4928 .slot_reset = qlge_io_slot_reset,
4929 .resume = qlge_io_resume,
4930};
4931
4932static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4933{
4934 struct net_device *ndev = pci_get_drvdata(pdev);
4935 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004936 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004937
4938 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004939 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004940
4941 if (netif_running(ndev)) {
4942 err = ql_adapter_down(qdev);
4943 if (!err)
4944 return err;
4945 }
4946
Ron Mercerbc083ce2009-10-21 11:07:40 +00004947 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004948 err = pci_save_state(pdev);
4949 if (err)
4950 return err;
4951
4952 pci_disable_device(pdev);
4953
4954 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4955
4956 return 0;
4957}
4958
David S. Miller04da2cf2008-09-19 16:14:24 -07004959#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004960static int qlge_resume(struct pci_dev *pdev)
4961{
4962 struct net_device *ndev = pci_get_drvdata(pdev);
4963 struct ql_adapter *qdev = netdev_priv(ndev);
4964 int err;
4965
4966 pci_set_power_state(pdev, PCI_D0);
4967 pci_restore_state(pdev);
4968 err = pci_enable_device(pdev);
4969 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004970 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004971 return err;
4972 }
4973 pci_set_master(pdev);
4974
4975 pci_enable_wake(pdev, PCI_D3hot, 0);
4976 pci_enable_wake(pdev, PCI_D3cold, 0);
4977
4978 if (netif_running(ndev)) {
4979 err = ql_adapter_up(qdev);
4980 if (err)
4981 return err;
4982 }
4983
Breno Leitao72046d82010-07-01 03:00:17 +00004984 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004985 netif_device_attach(ndev);
4986
4987 return 0;
4988}
David S. Miller04da2cf2008-09-19 16:14:24 -07004989#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004990
4991static void qlge_shutdown(struct pci_dev *pdev)
4992{
4993 qlge_suspend(pdev, PMSG_SUSPEND);
4994}
4995
4996static struct pci_driver qlge_driver = {
4997 .name = DRV_NAME,
4998 .id_table = qlge_pci_tbl,
4999 .probe = qlge_probe,
Bill Pemberton5d8e8722012-12-03 09:23:27 -05005000 .remove = qlge_remove,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005001#ifdef CONFIG_PM
5002 .suspend = qlge_suspend,
5003 .resume = qlge_resume,
5004#endif
5005 .shutdown = qlge_shutdown,
5006 .err_handler = &qlge_err_handler
5007};
5008
Peter Hüwe70a611d2013-05-21 12:58:08 +00005009module_pci_driver(qlge_driver);