blob: 449f506d2e8ff3e71abc22bba6ce8e67bcf45305 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -040099static int ql_wol(struct ql_adapter *);
100static void qlge_set_multicast_list(struct net_device *);
101static int ql_adapter_down(struct ql_adapter *);
102static int ql_adapter_up(struct ql_adapter *);
stephen hemmingerac409212010-10-21 07:50:54 +0000103
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400104/* This hardware semaphore causes exclusive access to
105 * resources shared between the NIC driver, MPI firmware,
106 * FCOE firmware and the FC driver.
107 */
108static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
109{
110 u32 sem_bits = 0;
111
112 switch (sem_mask) {
113 case SEM_XGMAC0_MASK:
114 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
115 break;
116 case SEM_XGMAC1_MASK:
117 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 break;
119 case SEM_ICB_MASK:
120 sem_bits = SEM_SET << SEM_ICB_SHIFT;
121 break;
122 case SEM_MAC_ADDR_MASK:
123 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 break;
125 case SEM_FLASH_MASK:
126 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 break;
128 case SEM_PROBE_MASK:
129 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
130 break;
131 case SEM_RT_IDX_MASK:
132 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
133 break;
134 case SEM_PROC_REG_MASK:
135 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 break;
137 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000138 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400139 return -EINVAL;
140 }
141
142 ql_write32(qdev, SEM, sem_bits | sem_mask);
143 return !(ql_read32(qdev, SEM) & sem_bits);
144}
145
146int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
147{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000148 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400149 do {
150 if (!ql_sem_trylock(qdev, sem_mask))
151 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000152 udelay(100);
153 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400154 return -ETIMEDOUT;
155}
156
157void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
158{
159 ql_write32(qdev, SEM, sem_mask);
160 ql_read32(qdev, SEM); /* flush */
161}
162
163/* This function waits for a specific bit to come ready
164 * in a given register. It is used mostly by the initialize
165 * process, but is also used in kernel thread API such as
166 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
167 */
168int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169{
170 u32 temp;
171 int count = UDELAY_COUNT;
172
173 while (count) {
174 temp = ql_read32(qdev, reg);
175
176 /* check for errors */
177 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000178 netif_alert(qdev, probe, qdev->ndev,
179 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400181 return -EIO;
182 } else if (temp & bit)
183 return 0;
184 udelay(UDELAY_DELAY);
185 count--;
186 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000187 netif_alert(qdev, probe, qdev->ndev,
188 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400189 return -ETIMEDOUT;
190}
191
192/* The CFG register is used to download TX and RX control blocks
193 * to the chip. This function waits for an operation to complete.
194 */
195static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
196{
197 int count = UDELAY_COUNT;
198 u32 temp;
199
200 while (count) {
201 temp = ql_read32(qdev, CFG);
202 if (temp & CFG_LE)
203 return -EIO;
204 if (!(temp & bit))
205 return 0;
206 udelay(UDELAY_DELAY);
207 count--;
208 }
209 return -ETIMEDOUT;
210}
211
212
213/* Used to issue init control blocks to hw. Maps control block,
214 * sets address, triggers download, waits for completion.
215 */
216int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
217 u16 q_id)
218{
219 u64 map;
220 int status = 0;
221 int direction;
222 u32 mask;
223 u32 value;
224
225 direction =
226 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 PCI_DMA_FROMDEVICE;
228
229 map = pci_map_single(qdev->pdev, ptr, size, direction);
230 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000231 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400232 return -ENOMEM;
233 }
234
Ron Mercer4322c5b2009-07-02 06:06:06 +0000235 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
236 if (status)
237 return status;
238
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400239 status = ql_wait_cfg(qdev, bit);
240 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000241 netif_err(qdev, ifup, qdev->ndev,
242 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400243 goto exit;
244 }
245
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246 ql_write32(qdev, ICB_L, (u32) map);
247 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400248
249 mask = CFG_Q_MASK | (bit << 16);
250 value = bit | (q_id << CFG_Q_SHIFT);
251 ql_write32(qdev, CFG, (mask | value));
252
253 /*
254 * Wait for the bit to clear after signaling hw.
255 */
256 status = ql_wait_cfg(qdev, bit);
257exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000258 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400259 pci_unmap_single(qdev->pdev, map, size, direction);
260 return status;
261}
262
263/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
264int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
265 u32 *value)
266{
267 u32 offset = 0;
268 int status;
269
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400270 switch (type) {
271 case MAC_ADDR_TYPE_MULTI_MAC:
272 case MAC_ADDR_TYPE_CAM_MAC:
273 {
274 status =
275 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800276 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400277 if (status)
278 goto exit;
279 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
280 (index << MAC_ADDR_IDX_SHIFT) | /* index */
281 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
282 status =
283 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800284 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400285 if (status)
286 goto exit;
287 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
288 status =
289 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800290 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400291 if (status)
292 goto exit;
293 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
294 (index << MAC_ADDR_IDX_SHIFT) | /* index */
295 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
296 status =
297 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800298 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400299 if (status)
300 goto exit;
301 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
302 if (type == MAC_ADDR_TYPE_CAM_MAC) {
303 status =
304 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800305 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400306 if (status)
307 goto exit;
308 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
309 (index << MAC_ADDR_IDX_SHIFT) | /* index */
310 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
311 status =
312 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800313 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400314 if (status)
315 goto exit;
316 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
317 }
318 break;
319 }
320 case MAC_ADDR_TYPE_VLAN:
321 case MAC_ADDR_TYPE_MULTI_FLTR:
322 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000323 netif_crit(qdev, ifup, qdev->ndev,
324 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400325 status = -EPERM;
326 }
327exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400328 return status;
329}
330
331/* Set up a MAC, multicast or VLAN address for the
332 * inbound frame matching.
333 */
334static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
335 u16 index)
336{
337 u32 offset = 0;
338 int status = 0;
339
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400340 switch (type) {
341 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000342 {
343 u32 upper = (addr[0] << 8) | addr[1];
344 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
345 (addr[4] << 8) | (addr[5]);
346
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
353 (index << MAC_ADDR_IDX_SHIFT) |
354 type | MAC_ADDR_E);
355 ql_write32(qdev, MAC_ADDR_DATA, lower);
356 status =
357 ql_wait_reg_rdy(qdev,
358 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 if (status)
360 goto exit;
361 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
362 (index << MAC_ADDR_IDX_SHIFT) |
363 type | MAC_ADDR_E);
364
365 ql_write32(qdev, MAC_ADDR_DATA, upper);
366 status =
367 ql_wait_reg_rdy(qdev,
368 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
369 if (status)
370 goto exit;
371 break;
372 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400373 case MAC_ADDR_TYPE_CAM_MAC:
374 {
375 u32 cam_output;
376 u32 upper = (addr[0] << 8) | addr[1];
377 u32 lower =
378 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400380 status =
381 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383 if (status)
384 goto exit;
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 type); /* type */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 status =
390 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400392 if (status)
393 goto exit;
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 type); /* type */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 status =
399 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400401 if (status)
402 goto exit;
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 type); /* type */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
409 */
Ron Mercer76b26692009-10-08 09:54:40 +0000410 cam_output = (CAM_OUT_ROUTE_NIC |
411 (qdev->
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
Patrick McHardyf6469682013-04-19 02:04:27 +0000414 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400418 break;
419 }
420 case MAC_ADDR_TYPE_VLAN:
421 {
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
427 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400428 status =
429 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800430 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400431 if (status)
432 goto exit;
433 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
434 (index << MAC_ADDR_IDX_SHIFT) | /* index */
435 type | /* type */
436 enable_bit); /* enable/disable */
437 break;
438 }
439 case MAC_ADDR_TYPE_MULTI_FLTR:
440 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000441 netif_crit(qdev, ifup, qdev->ndev,
442 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400443 status = -EPERM;
444 }
445exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400446 return status;
447}
448
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000449/* Set or clear MAC address in hardware. We sometimes
450 * have to clear it to prevent wrong frame routing
451 * especially in a bonding environment.
452 */
453static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
454{
455 int status;
456 char zero_mac_addr[ETH_ALEN];
457 char *addr;
458
459 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000460 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000461 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
462 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000463 } else {
464 memset(zero_mac_addr, 0, ETH_ALEN);
465 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000466 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
467 "Clearing MAC address\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000468 }
469 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
470 if (status)
471 return status;
472 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
473 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
474 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
475 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000476 netif_err(qdev, ifup, qdev->ndev,
477 "Failed to init mac address.\n");
Ron Mercer7fab3bfe2009-07-02 06:06:11 +0000478 return status;
479}
480
Ron Mercer6a473302009-07-02 06:06:12 +0000481void ql_link_on(struct ql_adapter *qdev)
482{
Joe Perchesae9540f72010-02-09 11:49:52 +0000483 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000484 netif_carrier_on(qdev->ndev);
485 ql_set_mac_addr(qdev, 1);
486}
487
488void ql_link_off(struct ql_adapter *qdev)
489{
Joe Perchesae9540f72010-02-09 11:49:52 +0000490 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000491 netif_carrier_off(qdev->ndev);
492 ql_set_mac_addr(qdev, 0);
493}
494
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400495/* Get a specific frame routing value from the CAM.
496 * Used for debug and reg dump.
497 */
498int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
499{
500 int status = 0;
501
Ron Mercer939678f2009-01-04 17:08:29 -0800502 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400503 if (status)
504 goto exit;
505
506 ql_write32(qdev, RT_IDX,
507 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400509 if (status)
510 goto exit;
511 *value = ql_read32(qdev, RT_DATA);
512exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400513 return status;
514}
515
516/* The NIC function for this chip has 16 routing indexes. Each one can be used
517 * to route different frame types to various inbound queues. We send broadcast/
518 * multicast/error frames to the default queue for slow handling,
519 * and CAM hit/RSS frames to the fast handling queues.
520 */
521static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
522 int enable)
523{
Ron Mercer8587ea32009-02-23 10:42:15 +0000524 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 u32 value = 0;
526
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400527 switch (mask) {
528 case RT_IDX_CAM_HIT:
529 {
530 value = RT_IDX_DST_CAM_Q | /* dest */
531 RT_IDX_TYPE_NICQ | /* type */
532 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 break;
534 }
535 case RT_IDX_VALID: /* Promiscuous Mode frames. */
536 {
537 value = RT_IDX_DST_DFLT_Q | /* dest */
538 RT_IDX_TYPE_NICQ | /* type */
539 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
540 break;
541 }
542 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
543 {
544 value = RT_IDX_DST_DFLT_Q | /* dest */
545 RT_IDX_TYPE_NICQ | /* type */
546 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
547 break;
548 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000549 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
550 {
551 value = RT_IDX_DST_DFLT_Q | /* dest */
552 RT_IDX_TYPE_NICQ | /* type */
553 (RT_IDX_IP_CSUM_ERR_SLOT <<
554 RT_IDX_IDX_SHIFT); /* index */
555 break;
556 }
557 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
558 {
559 value = RT_IDX_DST_DFLT_Q | /* dest */
560 RT_IDX_TYPE_NICQ | /* type */
561 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
562 RT_IDX_IDX_SHIFT); /* index */
563 break;
564 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400565 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
566 {
567 value = RT_IDX_DST_DFLT_Q | /* dest */
568 RT_IDX_TYPE_NICQ | /* type */
569 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 break;
571 }
572 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
573 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000574 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400575 RT_IDX_TYPE_NICQ | /* type */
576 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 break;
578 }
579 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
580 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000581 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400582 RT_IDX_TYPE_NICQ | /* type */
583 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 break;
585 }
586 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
587 {
588 value = RT_IDX_DST_RSS | /* dest */
589 RT_IDX_TYPE_NICQ | /* type */
590 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 break;
592 }
593 case 0: /* Clear the E-bit on an entry. */
594 {
595 value = RT_IDX_DST_DFLT_Q | /* dest */
596 RT_IDX_TYPE_NICQ | /* type */
597 (index << RT_IDX_IDX_SHIFT);/* index */
598 break;
599 }
600 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000601 netif_err(qdev, ifup, qdev->ndev,
602 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400603 status = -EPERM;
604 goto exit;
605 }
606
607 if (value) {
608 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
609 if (status)
610 goto exit;
611 value |= (enable ? RT_IDX_E : 0);
612 ql_write32(qdev, RT_IDX, value);
613 ql_write32(qdev, RT_DATA, enable ? mask : 0);
614 }
615exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400616 return status;
617}
618
619static void ql_enable_interrupts(struct ql_adapter *qdev)
620{
621 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
622}
623
624static void ql_disable_interrupts(struct ql_adapter *qdev)
625{
626 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
627}
628
629/* If we're running with multiple MSI-X vectors then we enable on the fly.
630 * Otherwise, we may have multiple outstanding workers and don't want to
631 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300632 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400633 * a worker finishes. Once it hits zero we enable the interrupt.
634 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400636{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700637 u32 var = 0;
638 unsigned long hw_flags = 0;
639 struct intr_context *ctx = qdev->intr_context + intr;
640
641 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
642 /* Always enable if we're MSIX multi interrupts and
643 * it's not the default (zeroeth) interrupt.
644 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400645 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700646 ctx->intr_en_mask);
647 var = ql_read32(qdev, STS);
648 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700650
651 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
652 if (atomic_dec_and_test(&ctx->irq_cnt)) {
653 ql_write32(qdev, INTR_EN,
654 ctx->intr_en_mask);
655 var = ql_read32(qdev, STS);
656 }
657 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
658 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400659}
660
661static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
662{
663 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400665
Ron Mercerbb0d2152008-10-20 10:30:26 -0700666 /* HW disables for us if we're MSIX multi interrupts and
667 * it's not the default (zeroeth) interrupt.
668 */
669 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
670 return 0;
671
672 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000673 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700676 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677 var = ql_read32(qdev, STS);
678 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700679 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000680 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 return var;
682}
683
684static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
685{
686 int i;
687 for (i = 0; i < qdev->intr_count; i++) {
688 /* The enable call does a atomic_dec_and_test
689 * and enables only if the result is zero.
690 * So we precharge it here.
691 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700692 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
693 i == 0))
694 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400695 ql_enable_completion_interrupt(qdev, i);
696 }
697
698}
699
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000700static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
701{
702 int status, i;
703 u16 csum = 0;
704 __le16 *flash = (__le16 *)&qdev->flash;
705
706 status = strncmp((char *)&qdev->flash, str, 4);
707 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000708 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000709 return status;
710 }
711
712 for (i = 0; i < size; i++)
713 csum += le16_to_cpu(*flash++);
714
715 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000716 netif_err(qdev, ifup, qdev->ndev,
717 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000718
719 return csum;
720}
721
Ron Mercer26351472009-02-02 13:53:57 -0800722static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400723{
724 int status = 0;
725 /* wait for reg to come ready */
726 status = ql_wait_reg_rdy(qdev,
727 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
728 if (status)
729 goto exit;
730 /* set up for reg read */
731 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
732 /* wait for reg to come ready */
733 status = ql_wait_reg_rdy(qdev,
734 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
735 if (status)
736 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800737 /* This data is stored on flash as an array of
738 * __le32. Since ql_read32() returns cpu endian
739 * we need to swap it back.
740 */
741 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400742exit:
743 return status;
744}
745
Ron Mercercdca8d02009-03-02 08:07:31 +0000746static int ql_get_8000_flash_params(struct ql_adapter *qdev)
747{
748 u32 i, size;
749 int status;
750 __le32 *p = (__le32 *)&qdev->flash;
751 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000752 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000753
754 /* Get flash offset for function and adjust
755 * for dword access.
756 */
Ron Mercere4552f52009-06-09 05:39:32 +0000757 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000758 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
759 else
760 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
761
762 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
763 return -ETIMEDOUT;
764
765 size = sizeof(struct flash_params_8000) / sizeof(u32);
766 for (i = 0; i < size; i++, p++) {
767 status = ql_read_flash_word(qdev, i+offset, p);
768 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000769 netif_err(qdev, ifup, qdev->ndev,
770 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000771 goto exit;
772 }
773 }
774
775 status = ql_validate_flash(qdev,
776 sizeof(struct flash_params_8000) / sizeof(u16),
777 "8000");
778 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000779 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000780 status = -EINVAL;
781 goto exit;
782 }
783
Ron Mercer542512e2009-06-09 05:39:33 +0000784 /* Extract either manufacturer or BOFM modified
785 * MAC address.
786 */
787 if (qdev->flash.flash_params_8000.data_type1 == 2)
788 memcpy(mac_addr,
789 qdev->flash.flash_params_8000.mac_addr1,
790 qdev->ndev->addr_len);
791 else
792 memcpy(mac_addr,
793 qdev->flash.flash_params_8000.mac_addr,
794 qdev->ndev->addr_len);
795
796 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000797 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000798 status = -EINVAL;
799 goto exit;
800 }
801
802 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000803 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000804 qdev->ndev->addr_len);
805
806exit:
807 ql_sem_unlock(qdev, SEM_FLASH_MASK);
808 return status;
809}
810
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000811static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400812{
813 int i;
814 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800815 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000817 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800818
819 /* Second function's parameters follow the first
820 * function's.
821 */
Ron Mercere4552f52009-06-09 05:39:32 +0000822 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000823 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400824
825 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
826 return -ETIMEDOUT;
827
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000828 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800829 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400830 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000831 netif_err(qdev, ifup, qdev->ndev,
832 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400833 goto exit;
834 }
835
836 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000837
838 status = ql_validate_flash(qdev,
839 sizeof(struct flash_params_8012) / sizeof(u16),
840 "8012");
841 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000842 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000843 status = -EINVAL;
844 goto exit;
845 }
846
847 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
848 status = -EINVAL;
849 goto exit;
850 }
851
852 memcpy(qdev->ndev->dev_addr,
853 qdev->flash.flash_params_8012.mac_addr,
854 qdev->ndev->addr_len);
855
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400856exit:
857 ql_sem_unlock(qdev, SEM_FLASH_MASK);
858 return status;
859}
860
861/* xgmac register are located behind the xgmac_addr and xgmac_data
862 * register pair. Each read/write requires us to wait for the ready
863 * bit before reading/writing the data.
864 */
865static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
866{
867 int status;
868 /* wait for reg to come ready */
869 status = ql_wait_reg_rdy(qdev,
870 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
871 if (status)
872 return status;
873 /* write the data to the data reg */
874 ql_write32(qdev, XGMAC_DATA, data);
875 /* trigger the write */
876 ql_write32(qdev, XGMAC_ADDR, reg);
877 return status;
878}
879
880/* xgmac register are located behind the xgmac_addr and xgmac_data
881 * register pair. Each read/write requires us to wait for the ready
882 * bit before reading/writing the data.
883 */
884int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
885{
886 int status = 0;
887 /* wait for reg to come ready */
888 status = ql_wait_reg_rdy(qdev,
889 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
890 if (status)
891 goto exit;
892 /* set up for reg read */
893 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
894 /* wait for reg to come ready */
895 status = ql_wait_reg_rdy(qdev,
896 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
897 if (status)
898 goto exit;
899 /* get the data */
900 *data = ql_read32(qdev, XGMAC_DATA);
901exit:
902 return status;
903}
904
905/* This is used for reading the 64-bit statistics regs. */
906int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
907{
908 int status = 0;
909 u32 hi = 0;
910 u32 lo = 0;
911
912 status = ql_read_xgmac_reg(qdev, reg, &lo);
913 if (status)
914 goto exit;
915
916 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
917 if (status)
918 goto exit;
919
920 *data = (u64) lo | ((u64) hi << 32);
921
922exit:
923 return status;
924}
925
Ron Mercercdca8d02009-03-02 08:07:31 +0000926static int ql_8000_port_initialize(struct ql_adapter *qdev)
927{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000928 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000929 /*
930 * Get MPI firmware version for driver banner
931 * and ethool info.
932 */
933 status = ql_mb_about_fw(qdev);
934 if (status)
935 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000936 status = ql_mb_get_fw_state(qdev);
937 if (status)
938 goto exit;
939 /* Wake up a worker to get/set the TX/RX frame sizes. */
940 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
941exit:
942 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000943}
944
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400945/* Take the MAC Core out of reset.
946 * Enable statistics counting.
947 * Take the transmitter/receiver out of reset.
948 * This functionality may be done in the MPI firmware at a
949 * later date.
950 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000951static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400952{
953 int status = 0;
954 u32 data;
955
956 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
957 /* Another function has the semaphore, so
958 * wait for the port init bit to come ready.
959 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000960 netif_info(qdev, link, qdev->ndev,
961 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400962 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
963 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000964 netif_crit(qdev, link, qdev->ndev,
965 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400966 }
967 return status;
968 }
969
Joe Perchesae9540f72010-02-09 11:49:52 +0000970 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400971 /* Set the core reset. */
972 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
973 if (status)
974 goto end;
975 data |= GLOBAL_CFG_RESET;
976 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
977 if (status)
978 goto end;
979
980 /* Clear the core reset and turn on jumbo for receiver. */
981 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
982 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
983 data |= GLOBAL_CFG_TX_STAT_EN;
984 data |= GLOBAL_CFG_RX_STAT_EN;
985 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
986 if (status)
987 goto end;
988
989 /* Enable transmitter, and clear it's reset. */
990 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
991 if (status)
992 goto end;
993 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
994 data |= TX_CFG_EN; /* Enable the transmitter. */
995 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
996 if (status)
997 goto end;
998
999 /* Enable receiver and clear it's reset. */
1000 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1001 if (status)
1002 goto end;
1003 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1004 data |= RX_CFG_EN; /* Enable the receiver. */
1005 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1006 if (status)
1007 goto end;
1008
1009 /* Turn on jumbo. */
1010 status =
1011 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1012 if (status)
1013 goto end;
1014 status =
1015 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1016 if (status)
1017 goto end;
1018
1019 /* Signal to the world that the port is enabled. */
1020 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1021end:
1022 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1023 return status;
1024}
1025
Ron Mercer7c734352009-10-19 03:32:19 +00001026static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1027{
1028 return PAGE_SIZE << qdev->lbq_buf_order;
1029}
1030
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001032static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001033{
1034 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1035 rx_ring->lbq_curr_idx++;
1036 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1037 rx_ring->lbq_curr_idx = 0;
1038 rx_ring->lbq_free_cnt++;
1039 return lbq_desc;
1040}
1041
Ron Mercer7c734352009-10-19 03:32:19 +00001042static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1043 struct rx_ring *rx_ring)
1044{
1045 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1046
1047 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001048 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001049 rx_ring->lbq_buf_size,
1050 PCI_DMA_FROMDEVICE);
1051
1052 /* If it's the last chunk of our master page then
1053 * we unmap it.
1054 */
1055 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1056 == ql_lbq_block_size(qdev))
1057 pci_unmap_page(qdev->pdev,
1058 lbq_desc->p.pg_chunk.map,
1059 ql_lbq_block_size(qdev),
1060 PCI_DMA_FROMDEVICE);
1061 return lbq_desc;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1068 rx_ring->sbq_curr_idx++;
1069 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1070 rx_ring->sbq_curr_idx = 0;
1071 rx_ring->sbq_free_cnt++;
1072 return sbq_desc;
1073}
1074
1075/* Update an rx ring index. */
1076static void ql_update_cq(struct rx_ring *rx_ring)
1077{
1078 rx_ring->cnsmr_idx++;
1079 rx_ring->curr_entry++;
1080 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1081 rx_ring->cnsmr_idx = 0;
1082 rx_ring->curr_entry = rx_ring->cq_base;
1083 }
1084}
1085
1086static void ql_write_cq_idx(struct rx_ring *rx_ring)
1087{
1088 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1089}
1090
Ron Mercer7c734352009-10-19 03:32:19 +00001091static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1092 struct bq_desc *lbq_desc)
1093{
1094 if (!rx_ring->pg_chunk.page) {
1095 u64 map;
1096 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1097 GFP_ATOMIC,
1098 qdev->lbq_buf_order);
1099 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001100 netif_err(qdev, drv, qdev->ndev,
1101 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001102 return -ENOMEM;
1103 }
1104 rx_ring->pg_chunk.offset = 0;
1105 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1106 0, ql_lbq_block_size(qdev),
1107 PCI_DMA_FROMDEVICE);
1108 if (pci_dma_mapping_error(qdev->pdev, map)) {
1109 __free_pages(rx_ring->pg_chunk.page,
1110 qdev->lbq_buf_order);
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00001111 rx_ring->pg_chunk.page = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00001112 netif_err(qdev, drv, qdev->ndev,
1113 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001114 return -ENOMEM;
1115 }
1116 rx_ring->pg_chunk.map = map;
1117 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1118 }
1119
1120 /* Copy the current master pg_chunk info
1121 * to the current descriptor.
1122 */
1123 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1124
1125 /* Adjust the master page chunk for next
1126 * buffer get.
1127 */
1128 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1129 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1130 rx_ring->pg_chunk.page = NULL;
1131 lbq_desc->p.pg_chunk.last_flag = 1;
1132 } else {
1133 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1134 get_page(rx_ring->pg_chunk.page);
1135 lbq_desc->p.pg_chunk.last_flag = 0;
1136 }
1137 return 0;
1138}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001139/* Process (refill) a large buffer queue. */
1140static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1141{
Ron Mercer49f21862009-02-23 10:42:16 +00001142 u32 clean_idx = rx_ring->lbq_clean_idx;
1143 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001145 u64 map;
1146 int i;
1147
Ron Mercer7c734352009-10-19 03:32:19 +00001148 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001149 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001150 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1151 "lbq: try cleaning clean_idx = %d.\n",
1152 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001153 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001154 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001155 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001157 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1158 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001159 return;
1160 }
Ron Mercer7c734352009-10-19 03:32:19 +00001161
1162 map = lbq_desc->p.pg_chunk.map +
1163 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001164 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1165 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001166 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001167 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001168
1169 pci_dma_sync_single_for_device(qdev->pdev, map,
1170 rx_ring->lbq_buf_size,
1171 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001172 clean_idx++;
1173 if (clean_idx == rx_ring->lbq_len)
1174 clean_idx = 0;
1175 }
1176
1177 rx_ring->lbq_clean_idx = clean_idx;
1178 rx_ring->lbq_prod_idx += 16;
1179 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1180 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001181 rx_ring->lbq_free_cnt -= 16;
1182 }
1183
1184 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001185 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1186 "lbq: updating prod idx = %d.\n",
1187 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001188 ql_write_db_reg(rx_ring->lbq_prod_idx,
1189 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001190 }
1191}
1192
1193/* Process (refill) a small buffer queue. */
1194static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1195{
Ron Mercer49f21862009-02-23 10:42:16 +00001196 u32 clean_idx = rx_ring->sbq_clean_idx;
1197 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001199 u64 map;
1200 int i;
1201
1202 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001203 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001204 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001205 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1206 "sbq: try cleaning clean_idx = %d.\n",
1207 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001208 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001209 netif_printk(qdev, rx_status, KERN_DEBUG,
1210 qdev->ndev,
1211 "sbq: getting new skb for index %d.\n",
1212 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 sbq_desc->p.skb =
1214 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001215 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 if (sbq_desc->p.skb == NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001217 rx_ring->sbq_clean_idx = clean_idx;
1218 return;
1219 }
1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221 map = pci_map_single(qdev->pdev,
1222 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001223 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001225 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001226 netif_err(qdev, ifup, qdev->ndev,
1227 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001228 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001229 dev_kfree_skb_any(sbq_desc->p.skb);
1230 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001231 return;
1232 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001233 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1234 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001235 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001236 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001237 }
1238
1239 clean_idx++;
1240 if (clean_idx == rx_ring->sbq_len)
1241 clean_idx = 0;
1242 }
1243 rx_ring->sbq_clean_idx = clean_idx;
1244 rx_ring->sbq_prod_idx += 16;
1245 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1246 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001247 rx_ring->sbq_free_cnt -= 16;
1248 }
1249
1250 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001251 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1252 "sbq: updating prod idx = %d.\n",
1253 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001254 ql_write_db_reg(rx_ring->sbq_prod_idx,
1255 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001256 }
1257}
1258
1259static void ql_update_buffer_queues(struct ql_adapter *qdev,
1260 struct rx_ring *rx_ring)
1261{
1262 ql_update_sbq(qdev, rx_ring);
1263 ql_update_lbq(qdev, rx_ring);
1264}
1265
1266/* Unmaps tx buffers. Can be called from send() if a pci mapping
1267 * fails at some stage, or from the interrupt when a tx completes.
1268 */
1269static void ql_unmap_send(struct ql_adapter *qdev,
1270 struct tx_ring_desc *tx_ring_desc, int mapped)
1271{
1272 int i;
1273 for (i = 0; i < mapped; i++) {
1274 if (i == 0 || (i == 7 && mapped > 7)) {
1275 /*
1276 * Unmap the skb->data area, or the
1277 * external sglist (AKA the Outbound
1278 * Address List (OAL)).
1279 * If its the zeroeth element, then it's
1280 * the skb->data area. If it's the 7th
1281 * element and there is more than 6 frags,
1282 * then its an OAL.
1283 */
1284 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001285 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 qdev->ndev,
1287 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 }
1289 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001290 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001291 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001292 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001293 maplen),
1294 PCI_DMA_TODEVICE);
1295 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001296 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1297 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001298 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001299 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001300 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001301 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001302 maplen), PCI_DMA_TODEVICE);
1303 }
1304 }
1305
1306}
1307
1308/* Map the buffers for this transmit. This will return
1309 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 */
1311static int ql_map_send(struct ql_adapter *qdev,
1312 struct ob_mac_iocb_req *mac_iocb_ptr,
1313 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314{
1315 int len = skb_headlen(skb);
1316 dma_addr_t map;
1317 int frag_idx, err, map_idx = 0;
1318 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1319 int frag_cnt = skb_shinfo(skb)->nr_frags;
1320
1321 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001322 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1323 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001324 }
1325 /*
1326 * Map the skb buffer first.
1327 */
1328 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329
1330 err = pci_dma_mapping_error(qdev->pdev, map);
1331 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001332 netif_err(qdev, tx_queued, qdev->ndev,
1333 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001334
1335 return NETDEV_TX_BUSY;
1336 }
1337
1338 tbd->len = cpu_to_le32(len);
1339 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001340 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1341 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001342 map_idx++;
1343
1344 /*
1345 * This loop fills the remainder of the 8 address descriptors
1346 * in the IOCB. If there are more than 7 fragments, then the
1347 * eighth address desc will point to an external list (OAL).
1348 * When this happens, the remainder of the frags will be stored
1349 * in this list.
1350 */
1351 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1352 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 tbd++;
1354 if (frag_idx == 6 && frag_cnt > 7) {
1355 /* Let's tack on an sglist.
1356 * Our control block will now
1357 * look like this:
1358 * iocb->seg[0] = skb->data
1359 * iocb->seg[1] = frag[0]
1360 * iocb->seg[2] = frag[1]
1361 * iocb->seg[3] = frag[2]
1362 * iocb->seg[4] = frag[3]
1363 * iocb->seg[5] = frag[4]
1364 * iocb->seg[6] = frag[5]
1365 * iocb->seg[7] = ptr to OAL (external sglist)
1366 * oal->seg[0] = frag[6]
1367 * oal->seg[1] = frag[7]
1368 * oal->seg[2] = frag[8]
1369 * oal->seg[3] = frag[9]
1370 * oal->seg[4] = frag[10]
1371 * etc...
1372 */
1373 /* Tack on the OAL in the eighth segment of IOCB. */
1374 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1375 sizeof(struct oal),
1376 PCI_DMA_TODEVICE);
1377 err = pci_dma_mapping_error(qdev->pdev, map);
1378 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001379 netif_err(qdev, tx_queued, qdev->ndev,
1380 "PCI mapping outbound address list with error: %d\n",
1381 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001382 goto map_error;
1383 }
1384
1385 tbd->addr = cpu_to_le64(map);
1386 /*
1387 * The length is the number of fragments
1388 * that remain to be mapped times the length
1389 * of our sglist (OAL).
1390 */
1391 tbd->len =
1392 cpu_to_le32((sizeof(struct tx_buf_desc) *
1393 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001394 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001395 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001396 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001397 sizeof(struct oal));
1398 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1399 map_idx++;
1400 }
1401
Eric Dumazet9e903e02011-10-18 21:00:24 +00001402 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001403 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001404
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001405 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001406 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001407 netif_err(qdev, tx_queued, qdev->ndev,
1408 "PCI mapping frags failed with error: %d.\n",
1409 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001410 goto map_error;
1411 }
1412
1413 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001414 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001415 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1416 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001417 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001418
1419 }
1420 /* Save the number of segments we've mapped. */
1421 tx_ring_desc->map_cnt = map_idx;
1422 /* Terminate the last segment. */
1423 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1424 return NETDEV_TX_OK;
1425
1426map_error:
1427 /*
1428 * If the first frag mapping failed, then i will be zero.
1429 * This causes the unmap of the skb->data area. Otherwise
1430 * we pass in the number of frags that mapped successfully
1431 * so they can be umapped.
1432 */
1433 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1434 return NETDEV_TX_BUSY;
1435}
1436
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001437/* Categorizing receive firmware frame errors */
Sritej Velagaae721f32013-04-18 19:49:52 +00001438static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1439 struct rx_ring *rx_ring)
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001440{
1441 struct nic_stats *stats = &qdev->nic_stats;
1442
1443 stats->rx_err_count++;
Sritej Velagaae721f32013-04-18 19:49:52 +00001444 rx_ring->rx_errors++;
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001445
1446 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1447 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1448 stats->rx_code_err++;
1449 break;
1450 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1451 stats->rx_oversize_err++;
1452 break;
1453 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1454 stats->rx_undersize_err++;
1455 break;
1456 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1457 stats->rx_preamble_err++;
1458 break;
1459 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1460 stats->rx_frame_len_err++;
1461 break;
1462 case IB_MAC_IOCB_RSP_ERR_CRC:
1463 stats->rx_crc_err++;
1464 default:
1465 break;
1466 }
1467}
1468
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001469/**
1470 * ql_update_mac_hdr_len - helper routine to update the mac header length
1471 * based on vlan tags if present
1472 */
1473static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1474 struct ib_mac_iocb_rsp *ib_mac_rsp,
1475 void *page, size_t *len)
1476{
1477 u16 *tags;
1478
1479 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1480 return;
1481 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1482 tags = (u16 *)page;
1483 /* Look for stacked vlan tags in ethertype field */
1484 if (tags[6] == ETH_P_8021Q &&
1485 tags[8] == ETH_P_8021Q)
1486 *len += 2 * VLAN_HLEN;
1487 else
1488 *len += VLAN_HLEN;
1489 }
1490}
1491
Ron Mercer4f848c02010-01-02 10:37:43 +00001492/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001493static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1494 struct rx_ring *rx_ring,
1495 struct ib_mac_iocb_rsp *ib_mac_rsp,
1496 u32 length,
1497 u16 vlan_id)
1498{
1499 struct sk_buff *skb;
1500 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001501 struct napi_struct *napi = &rx_ring->napi;
1502
Sritej Velagaae721f32013-04-18 19:49:52 +00001503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1505 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1506 put_page(lbq_desc->p.pg_chunk.page);
1507 return;
1508 }
Ron Mercer63526712010-01-02 10:37:44 +00001509 napi->dev = qdev->ndev;
1510
1511 skb = napi_get_frags(napi);
1512 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001513 netif_err(qdev, drv, qdev->ndev,
1514 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001515 rx_ring->rx_dropped++;
1516 put_page(lbq_desc->p.pg_chunk.page);
1517 return;
1518 }
1519 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001520 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1521 lbq_desc->p.pg_chunk.page,
1522 lbq_desc->p.pg_chunk.offset,
1523 length);
Ron Mercer63526712010-01-02 10:37:44 +00001524
1525 skb->len += length;
1526 skb->data_len += length;
1527 skb->truesize += length;
1528 skb_shinfo(skb)->nr_frags++;
1529
1530 rx_ring->rx_packets++;
1531 rx_ring->rx_bytes += length;
1532 skb->ip_summed = CHECKSUM_UNNECESSARY;
1533 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001534 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001536 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001537}
1538
1539/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001540static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1541 struct rx_ring *rx_ring,
1542 struct ib_mac_iocb_rsp *ib_mac_rsp,
1543 u32 length,
1544 u16 vlan_id)
1545{
1546 struct net_device *ndev = qdev->ndev;
1547 struct sk_buff *skb = NULL;
1548 void *addr;
1549 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1550 struct napi_struct *napi = &rx_ring->napi;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001551 size_t hlen = ETH_HLEN;
Ron Mercer4f848c02010-01-02 10:37:43 +00001552
1553 skb = netdev_alloc_skb(ndev, length);
1554 if (!skb) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001555 rx_ring->rx_dropped++;
1556 put_page(lbq_desc->p.pg_chunk.page);
1557 return;
1558 }
1559
1560 addr = lbq_desc->p.pg_chunk.va;
1561 prefetch(addr);
1562
Sritej Velagaae721f32013-04-18 19:49:52 +00001563 /* Frame error, so drop the packet. */
1564 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1565 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1566 goto err_out;
1567 }
1568
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001569 /* Update the MAC header length*/
1570 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1571
Ron Mercer4f848c02010-01-02 10:37:43 +00001572 /* The max framesize filter on this chip is set higher than
1573 * MTU since FCoE uses 2k frames.
1574 */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001575 if (skb->len > ndev->mtu + hlen) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001576 netif_err(qdev, drv, qdev->ndev,
1577 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001578 rx_ring->rx_dropped++;
1579 goto err_out;
1580 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001581 memcpy(skb_put(skb, hlen), addr, hlen);
Joe Perchesae9540f72010-02-09 11:49:52 +00001582 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1583 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1584 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001585 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001586 lbq_desc->p.pg_chunk.offset + hlen,
1587 length - hlen);
1588 skb->len += length - hlen;
1589 skb->data_len += length - hlen;
1590 skb->truesize += length - hlen;
Ron Mercer4f848c02010-01-02 10:37:43 +00001591
1592 rx_ring->rx_packets++;
1593 rx_ring->rx_bytes += skb->len;
1594 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001595 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001596
Michał Mirosław88230fd2011-04-18 13:31:21 +00001597 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001598 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1599 /* TCP frame. */
1600 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001601 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1602 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001603 skb->ip_summed = CHECKSUM_UNNECESSARY;
1604 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1605 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1606 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001607 struct iphdr *iph =
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001608 (struct iphdr *)((u8 *)addr + hlen);
Ron Mercer4f848c02010-01-02 10:37:43 +00001609 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001610 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001611 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001612 netif_printk(qdev, rx_status, KERN_DEBUG,
1613 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001614 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001615 }
1616 }
1617 }
1618
1619 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001620 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001621 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001622 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1623 napi_gro_receive(napi, skb);
1624 else
1625 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001626 return;
1627err_out:
1628 dev_kfree_skb_any(skb);
1629 put_page(lbq_desc->p.pg_chunk.page);
1630}
1631
1632/* Process an inbound completion from an rx ring. */
1633static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1634 struct rx_ring *rx_ring,
1635 struct ib_mac_iocb_rsp *ib_mac_rsp,
1636 u32 length,
1637 u16 vlan_id)
1638{
1639 struct net_device *ndev = qdev->ndev;
1640 struct sk_buff *skb = NULL;
1641 struct sk_buff *new_skb = NULL;
1642 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1643
1644 skb = sbq_desc->p.skb;
1645 /* Allocate new_skb and copy */
1646 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1647 if (new_skb == NULL) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001648 rx_ring->rx_dropped++;
1649 return;
1650 }
1651 skb_reserve(new_skb, NET_IP_ALIGN);
1652 memcpy(skb_put(new_skb, length), skb->data, length);
1653 skb = new_skb;
1654
Sritej Velagaae721f32013-04-18 19:49:52 +00001655 /* Frame error, so drop the packet. */
1656 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1657 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1658 dev_kfree_skb_any(skb);
1659 return;
1660 }
1661
Ron Mercer4f848c02010-01-02 10:37:43 +00001662 /* loopback self test for ethtool */
1663 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1664 ql_check_lb_frame(qdev, skb);
1665 dev_kfree_skb_any(skb);
1666 return;
1667 }
1668
1669 /* The max framesize filter on this chip is set higher than
1670 * MTU since FCoE uses 2k frames.
1671 */
1672 if (skb->len > ndev->mtu + ETH_HLEN) {
1673 dev_kfree_skb_any(skb);
1674 rx_ring->rx_dropped++;
1675 return;
1676 }
1677
1678 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001679 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001680 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1681 "%s Multicast.\n",
1682 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1683 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1684 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1685 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1686 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1687 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001688 }
1689 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001690 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001692
1693 rx_ring->rx_packets++;
1694 rx_ring->rx_bytes += skb->len;
1695 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001696 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001697
1698 /* If rx checksum is on, and there are no
1699 * csum or frame errors.
1700 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001701 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001702 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1703 /* TCP frame. */
1704 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001705 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1706 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001707 skb->ip_summed = CHECKSUM_UNNECESSARY;
1708 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1709 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1710 /* Unfragmented ipv4 UDP frame. */
1711 struct iphdr *iph = (struct iphdr *) skb->data;
1712 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001713 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001714 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001715 netif_printk(qdev, rx_status, KERN_DEBUG,
1716 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001717 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001718 }
1719 }
1720 }
1721
1722 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001723 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001724 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001725 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1726 napi_gro_receive(&rx_ring->napi, skb);
1727 else
1728 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001729}
1730
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001731static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001732{
1733 void *temp_addr = skb->data;
1734
1735 /* Undo the skb_reserve(skb,32) we did before
1736 * giving to hardware, and realign data on
1737 * a 2-byte boundary.
1738 */
1739 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1740 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1741 skb_copy_to_linear_data(skb, temp_addr,
1742 (unsigned int)len);
1743}
1744
1745/*
1746 * This function builds an skb for the given inbound
1747 * completion. It will be rewritten for readability in the near
1748 * future, but for not it works well.
1749 */
1750static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1751 struct rx_ring *rx_ring,
1752 struct ib_mac_iocb_rsp *ib_mac_rsp)
1753{
1754 struct bq_desc *lbq_desc;
1755 struct bq_desc *sbq_desc;
1756 struct sk_buff *skb = NULL;
1757 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001758 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1759 size_t hlen = ETH_HLEN;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760
1761 /*
1762 * Handle the header buffer if present.
1763 */
1764 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1765 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001766 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1767 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001768 /*
1769 * Headers fit nicely into a small buffer.
1770 */
1771 sbq_desc = ql_get_curr_sbuf(rx_ring);
1772 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001773 dma_unmap_addr(sbq_desc, mapaddr),
1774 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001775 PCI_DMA_FROMDEVICE);
1776 skb = sbq_desc->p.skb;
1777 ql_realign_skb(skb, hdr_len);
1778 skb_put(skb, hdr_len);
1779 sbq_desc->p.skb = NULL;
1780 }
1781
1782 /*
1783 * Handle the data buffer(s).
1784 */
1785 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001786 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1787 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001788 return skb;
1789 }
1790
1791 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1792 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001793 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1794 "Headers in small, data of %d bytes in small, combine them.\n",
1795 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 /*
1797 * Data is less than small buffer size so it's
1798 * stuffed in a small buffer.
1799 * For this case we append the data
1800 * from the "data" small buffer to the "header" small
1801 * buffer.
1802 */
1803 sbq_desc = ql_get_curr_sbuf(rx_ring);
1804 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001805 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001807 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001808 (sbq_desc, maplen),
1809 PCI_DMA_FROMDEVICE);
1810 memcpy(skb_put(skb, length),
1811 sbq_desc->p.skb->data, length);
1812 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001813 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001814 (sbq_desc,
1815 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001816 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001817 (sbq_desc,
1818 maplen),
1819 PCI_DMA_FROMDEVICE);
1820 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001821 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1822 "%d bytes in a single small buffer.\n",
1823 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 sbq_desc = ql_get_curr_sbuf(rx_ring);
1825 skb = sbq_desc->p.skb;
1826 ql_realign_skb(skb, length);
1827 skb_put(skb, length);
1828 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001829 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001830 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001831 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001832 maplen),
1833 PCI_DMA_FROMDEVICE);
1834 sbq_desc->p.skb = NULL;
1835 }
1836 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1837 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001838 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839 "Header in small, %d bytes in large. Chain large to small!\n",
1840 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001841 /*
1842 * The data is in a single large buffer. We
1843 * chain it to the header buffer's skb and let
1844 * it rip.
1845 */
Ron Mercer7c734352009-10-19 03:32:19 +00001846 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848 "Chaining page at offset = %d, for %d bytes to skb.\n",
1849 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001850 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1851 lbq_desc->p.pg_chunk.offset,
1852 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001853 skb->len += length;
1854 skb->data_len += length;
1855 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001856 } else {
1857 /*
1858 * The headers and data are in a single large buffer. We
1859 * copy it to a new skb and let it go. This can happen with
1860 * jumbo mtu on a non-TCP/UDP frame.
1861 */
Ron Mercer7c734352009-10-19 03:32:19 +00001862 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 skb = netdev_alloc_skb(qdev->ndev, length);
1864 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001865 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1866 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001867 return NULL;
1868 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001869 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001870 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001871 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001872 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001873 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001874 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001875 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1876 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1877 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001878 skb_fill_page_desc(skb, 0,
1879 lbq_desc->p.pg_chunk.page,
1880 lbq_desc->p.pg_chunk.offset,
1881 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001882 skb->len += length;
1883 skb->data_len += length;
1884 skb->truesize += length;
1885 length -= length;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001886 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1887 lbq_desc->p.pg_chunk.va,
1888 &hlen);
1889 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001890 }
1891 } else {
1892 /*
1893 * The data is in a chain of large buffers
1894 * pointed to by a small buffer. We loop
1895 * thru and chain them to the our small header
1896 * buffer's skb.
1897 * frags: There are 18 max frags and our small
1898 * buffer will hold 32 of them. The thing is,
1899 * we'll use 3 max for our 9000 byte jumbo
1900 * frames. If the MTU goes up we could
1901 * eventually be in trouble.
1902 */
Ron Mercer7c734352009-10-19 03:32:19 +00001903 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001904 sbq_desc = ql_get_curr_sbuf(rx_ring);
1905 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001906 dma_unmap_addr(sbq_desc, mapaddr),
1907 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001908 PCI_DMA_FROMDEVICE);
1909 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1910 /*
1911 * This is an non TCP/UDP IP frame, so
1912 * the headers aren't split into a small
1913 * buffer. We have to use the small buffer
1914 * that contains our sg list as our skb to
1915 * send upstairs. Copy the sg list here to
1916 * a local buffer and use it to find the
1917 * pages to chain.
1918 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001919 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1920 "%d bytes of headers & data in chain of large.\n",
1921 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001922 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001923 sbq_desc->p.skb = NULL;
1924 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001925 }
1926 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001927 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1928 size = (length < rx_ring->lbq_buf_size) ? length :
1929 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001930
Joe Perchesae9540f72010-02-09 11:49:52 +00001931 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1932 "Adding page %d to skb for %d bytes.\n",
1933 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001934 skb_fill_page_desc(skb, i,
1935 lbq_desc->p.pg_chunk.page,
1936 lbq_desc->p.pg_chunk.offset,
1937 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001938 skb->len += size;
1939 skb->data_len += size;
1940 skb->truesize += size;
1941 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942 i++;
1943 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001944 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1945 &hlen);
1946 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001947 }
1948 return skb;
1949}
1950
1951/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001952static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001953 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001954 struct ib_mac_iocb_rsp *ib_mac_rsp,
1955 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001956{
1957 struct net_device *ndev = qdev->ndev;
1958 struct sk_buff *skb = NULL;
1959
1960 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1961
1962 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1963 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001964 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001966 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001967 return;
1968 }
1969
Sritej Velagaae721f32013-04-18 19:49:52 +00001970 /* Frame error, so drop the packet. */
1971 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1972 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1973 dev_kfree_skb_any(skb);
1974 return;
1975 }
1976
Ron Mercerec33a492009-06-09 05:39:28 +00001977 /* The max framesize filter on this chip is set higher than
1978 * MTU since FCoE uses 2k frames.
1979 */
1980 if (skb->len > ndev->mtu + ETH_HLEN) {
1981 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001982 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001983 return;
1984 }
1985
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001986 /* loopback self test for ethtool */
1987 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1988 ql_check_lb_frame(qdev, skb);
1989 dev_kfree_skb_any(skb);
1990 return;
1991 }
1992
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001993 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001994 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001995 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1996 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1997 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1998 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1999 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2000 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2001 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00002002 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002003 }
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002007 }
Ron Mercerd555f592009-03-09 10:59:19 +00002008
Ron Mercerd555f592009-03-09 10:59:19 +00002009 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002010 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002011
2012 /* If rx checksum is on, and there are no
2013 * csum or frame errors.
2014 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00002015 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00002016 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2017 /* TCP frame. */
2018 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002019 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2020 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002021 skb->ip_summed = CHECKSUM_UNNECESSARY;
2022 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2023 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2024 /* Unfragmented ipv4 UDP frame. */
2025 struct iphdr *iph = (struct iphdr *) skb->data;
2026 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00002027 htons(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002028 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002029 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2030 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002031 }
2032 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002033 }
Ron Mercerd555f592009-03-09 10:59:19 +00002034
Ron Mercer885ee392009-11-03 13:49:31 +00002035 rx_ring->rx_packets++;
2036 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002037 skb_record_rx_queue(skb, rx_ring->cq_id);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002038 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002039 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002040 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2041 napi_gro_receive(&rx_ring->napi, skb);
2042 else
2043 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002044}
2045
Ron Mercer4f848c02010-01-02 10:37:43 +00002046/* Process an inbound completion from an rx ring. */
2047static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2048 struct rx_ring *rx_ring,
2049 struct ib_mac_iocb_rsp *ib_mac_rsp)
2050{
2051 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002052 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2053 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
Ron Mercer4f848c02010-01-02 10:37:43 +00002054 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2055 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2056
2057 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2058
2059 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2060 /* The data and headers are split into
2061 * separate buffers.
2062 */
2063 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2064 vlan_id);
2065 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2066 /* The data fit in a single small buffer.
2067 * Allocate a new skb, copy the data and
2068 * return the buffer to the free pool.
2069 */
2070 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2071 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002072 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2073 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2074 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2075 /* TCP packet in a page chunk that's been checksummed.
2076 * Tack it on to our GRO skb and let it go.
2077 */
2078 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2079 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002080 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2081 /* Non-TCP packet in a page chunk. Allocate an
2082 * skb, tack it on frags, and send it up.
2083 */
2084 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2085 length, vlan_id);
2086 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002087 /* Non-TCP/UDP large frames that span multiple buffers
2088 * can be processed corrrectly by the split frame logic.
2089 */
2090 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2091 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002092 }
2093
2094 return (unsigned long)length;
2095}
2096
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002097/* Process an outbound completion from an rx ring. */
2098static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2099 struct ob_mac_iocb_rsp *mac_rsp)
2100{
2101 struct tx_ring *tx_ring;
2102 struct tx_ring_desc *tx_ring_desc;
2103
2104 QL_DUMP_OB_MAC_RSP(mac_rsp);
2105 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2106 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2107 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002108 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2109 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002110 dev_kfree_skb(tx_ring_desc->skb);
2111 tx_ring_desc->skb = NULL;
2112
2113 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2114 OB_MAC_IOCB_RSP_S |
2115 OB_MAC_IOCB_RSP_L |
2116 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2117 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002118 netif_warn(qdev, tx_done, qdev->ndev,
2119 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002120 }
2121 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002122 netif_warn(qdev, tx_done, qdev->ndev,
2123 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002124 }
2125 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002126 netif_warn(qdev, tx_done, qdev->ndev,
2127 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002128 }
2129 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002130 netif_warn(qdev, tx_done, qdev->ndev,
2131 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 }
2133 }
2134 atomic_inc(&tx_ring->tx_count);
2135}
2136
2137/* Fire up a handler to reset the MPI processor. */
2138void ql_queue_fw_error(struct ql_adapter *qdev)
2139{
Ron Mercer6a473302009-07-02 06:06:12 +00002140 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002141 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2142}
2143
2144void ql_queue_asic_error(struct ql_adapter *qdev)
2145{
Ron Mercer6a473302009-07-02 06:06:12 +00002146 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002147 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002148 /* Clear adapter up bit to signal the recovery
2149 * process that it shouldn't kill the reset worker
2150 * thread
2151 */
2152 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002153 /* Set asic recovery bit to indicate reset process that we are
2154 * in fatal error recovery process rather than normal close
2155 */
2156 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002157 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2158}
2159
2160static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2161 struct ib_ae_iocb_rsp *ib_ae_rsp)
2162{
2163 switch (ib_ae_rsp->event) {
2164 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002165 netif_err(qdev, rx_err, qdev->ndev,
2166 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 ql_queue_fw_error(qdev);
2168 return;
2169
2170 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002171 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2172 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002173 ql_queue_asic_error(qdev);
2174 return;
2175
2176 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002177 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002178 ql_queue_asic_error(qdev);
2179 break;
2180
2181 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002182 netdev_err(qdev->ndev, "PCI error occurred when reading "
2183 "anonymous buffers from rx_ring %d.\n",
2184 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185 ql_queue_asic_error(qdev);
2186 break;
2187
2188 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002189 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2190 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002191 ql_queue_asic_error(qdev);
2192 break;
2193 }
2194}
2195
2196static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2197{
2198 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002199 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002200 struct ob_mac_iocb_rsp *net_rsp = NULL;
2201 int count = 0;
2202
Ron Mercer1e213302009-03-09 10:59:21 +00002203 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002204 /* While there are entries in the completion queue. */
2205 while (prod != rx_ring->cnsmr_idx) {
2206
Joe Perchesae9540f72010-02-09 11:49:52 +00002207 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002210
2211 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2212 rmb();
2213 switch (net_rsp->opcode) {
2214
2215 case OPCODE_OB_MAC_TSO_IOCB:
2216 case OPCODE_OB_MAC_IOCB:
2217 ql_process_mac_tx_intr(qdev, net_rsp);
2218 break;
2219 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002220 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2222 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002223 }
2224 count++;
2225 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002226 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002227 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002228 if (!net_rsp)
2229 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002230 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002231 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002232 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002233 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002234 /*
2235 * The queue got stopped because the tx_ring was full.
2236 * Wake it up, because it's now at least 25% empty.
2237 */
Ron Mercer1e213302009-03-09 10:59:21 +00002238 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239 }
2240
2241 return count;
2242}
2243
2244static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2245{
2246 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002247 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002248 struct ql_net_rsp_iocb *net_rsp;
2249 int count = 0;
2250
2251 /* While there are entries in the completion queue. */
2252 while (prod != rx_ring->cnsmr_idx) {
2253
Joe Perchesae9540f72010-02-09 11:49:52 +00002254 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2255 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2256 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002257
2258 net_rsp = rx_ring->curr_entry;
2259 rmb();
2260 switch (net_rsp->opcode) {
2261 case OPCODE_IB_MAC_IOCB:
2262 ql_process_mac_rx_intr(qdev, rx_ring,
2263 (struct ib_mac_iocb_rsp *)
2264 net_rsp);
2265 break;
2266
2267 case OPCODE_IB_AE_IOCB:
2268 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2269 net_rsp);
2270 break;
2271 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002272 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2274 net_rsp->opcode);
2275 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002276 }
2277 count++;
2278 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002279 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002280 if (count == budget)
2281 break;
2282 }
2283 ql_update_buffer_queues(qdev, rx_ring);
2284 ql_write_cq_idx(rx_ring);
2285 return count;
2286}
2287
2288static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2289{
2290 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2291 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002292 struct rx_ring *trx_ring;
2293 int i, work_done = 0;
2294 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002295
Joe Perchesae9540f72010-02-09 11:49:52 +00002296 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2297 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002298
Ron Mercer39aa8162009-08-27 11:02:11 +00002299 /* Service the TX rings first. They start
2300 * right after the RSS rings. */
2301 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2302 trx_ring = &qdev->rx_ring[i];
2303 /* If this TX completion ring belongs to this vector and
2304 * it's not empty then service it.
2305 */
2306 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2307 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2308 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002309 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2310 "%s: Servicing TX completion ring %d.\n",
2311 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002312 ql_clean_outbound_rx_ring(trx_ring);
2313 }
2314 }
2315
2316 /*
2317 * Now service the RSS ring if it's active.
2318 */
2319 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2320 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002321 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2322 "%s: Servicing RX completion ring %d.\n",
2323 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002324 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2325 }
2326
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002327 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002328 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002329 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2330 }
2331 return work_done;
2332}
2333
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002334static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002335{
2336 struct ql_adapter *qdev = netdev_priv(ndev);
2337
Patrick McHardyf6469682013-04-19 02:04:27 +00002338 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002339 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002340 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002341 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002342 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2343 }
2344}
2345
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002346/**
2347 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2348 * based on the features to enable/disable hardware vlan accel
2349 */
2350static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351 netdev_features_t features)
2352{
2353 struct ql_adapter *qdev = netdev_priv(ndev);
2354 int status = 0;
2355
2356 status = ql_adapter_down(qdev);
2357 if (status) {
2358 netif_err(qdev, link, qdev->ndev,
2359 "Failed to bring down the adapter\n");
2360 return status;
2361 }
2362
2363 /* update the features with resent change */
2364 ndev->features = features;
2365
2366 status = ql_adapter_up(qdev);
2367 if (status) {
2368 netif_err(qdev, link, qdev->ndev,
2369 "Failed to bring up the adapter\n");
2370 return status;
2371 }
2372 return status;
2373}
2374
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002375static netdev_features_t qlge_fix_features(struct net_device *ndev,
2376 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002377{
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002378 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002379
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002380 /* Update the behavior of vlan accel in the adapter */
2381 err = qlge_update_hw_vlan_features(ndev, features);
2382 if (err)
2383 return err;
2384
Jiri Pirko18c49b92011-07-21 03:24:11 +00002385 return features;
2386}
2387
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002388static int qlge_set_features(struct net_device *ndev,
2389 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002390{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002391 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002392
Patrick McHardyf6469682013-04-19 02:04:27 +00002393 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002394 qlge_vlan_mode(ndev, features);
2395
2396 return 0;
2397}
2398
Jiri Pirko8e586132011-12-08 19:52:37 -05002399static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002400{
2401 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002402 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002403
Jiri Pirko8e586132011-12-08 19:52:37 -05002404 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2405 MAC_ADDR_TYPE_VLAN, vid);
2406 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002407 netif_err(qdev, ifup, qdev->ndev,
2408 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002409 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002410}
2411
Patrick McHardy80d5c362013-04-19 02:04:28 +00002412static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002413{
2414 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002415 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002416 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002417
Ron Mercercc288f52009-02-23 10:42:14 +00002418 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2419 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002420 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002421
Jiri Pirko8e586132011-12-08 19:52:37 -05002422 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002423 set_bit(vid, qdev->active_vlans);
2424
Ron Mercercc288f52009-02-23 10:42:14 +00002425 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002426
2427 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002428}
2429
Jiri Pirko8e586132011-12-08 19:52:37 -05002430static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002431{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002432 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002433 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434
Jiri Pirko8e586132011-12-08 19:52:37 -05002435 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2436 MAC_ADDR_TYPE_VLAN, vid);
2437 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002438 netif_err(qdev, ifup, qdev->ndev,
2439 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002440 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002441}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002442
Patrick McHardy80d5c362013-04-19 02:04:28 +00002443static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002444{
2445 struct ql_adapter *qdev = netdev_priv(ndev);
2446 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002447 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002448
2449 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2450 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002451 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002452
Jiri Pirko8e586132011-12-08 19:52:37 -05002453 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002454 clear_bit(vid, qdev->active_vlans);
2455
2456 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002457
2458 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002459}
2460
Ron Mercerc1b60092010-10-27 04:58:12 +00002461static void qlge_restore_vlan(struct ql_adapter *qdev)
2462{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002463 int status;
2464 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002465
Jiri Pirko18c49b92011-07-21 03:24:11 +00002466 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2467 if (status)
2468 return;
2469
2470 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2471 __qlge_vlan_rx_add_vid(qdev, vid);
2472
2473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002474}
2475
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002476/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2477static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2478{
2479 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002480 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002481 return IRQ_HANDLED;
2482}
2483
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002484/* This handles a fatal error, MPI activity, and the default
2485 * rx_ring in an MSI-X multiple vector environment.
2486 * In MSI/Legacy environment it also process the rest of
2487 * the rx_rings.
2488 */
2489static irqreturn_t qlge_isr(int irq, void *dev_id)
2490{
2491 struct rx_ring *rx_ring = dev_id;
2492 struct ql_adapter *qdev = rx_ring->qdev;
2493 struct intr_context *intr_context = &qdev->intr_context[0];
2494 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002495 int work_done = 0;
2496
Ron Mercerbb0d2152008-10-20 10:30:26 -07002497 spin_lock(&qdev->hw_lock);
2498 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002499 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2500 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002501 spin_unlock(&qdev->hw_lock);
2502 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002503 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002504 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002505
Ron Mercerbb0d2152008-10-20 10:30:26 -07002506 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002507
2508 /*
2509 * Check for fatal error.
2510 */
2511 if (var & STS_FE) {
2512 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002513 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002514 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002515 netdev_err(qdev->ndev, "Resetting chip. "
2516 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002517 return IRQ_HANDLED;
2518 }
2519
2520 /*
2521 * Check MPI processor activity.
2522 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002523 if ((var & STS_PI) &&
2524 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002525 /*
2526 * We've got an async event or mailbox completion.
2527 * Handle it and clear the source of the interrupt.
2528 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002529 netif_err(qdev, intr, qdev->ndev,
2530 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002531 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002532 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2533 queue_delayed_work_on(smp_processor_id(),
2534 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002535 work_done++;
2536 }
2537
2538 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002539 * Get the bit-mask that shows the active queues for this
2540 * pass. Compare it to the queues that this irq services
2541 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002542 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002543 var = ql_read32(qdev, ISR1);
2544 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002545 netif_info(qdev, intr, qdev->ndev,
2546 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002547 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002548 napi_schedule(&rx_ring->napi);
2549 work_done++;
2550 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002551 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002552 return work_done ? IRQ_HANDLED : IRQ_NONE;
2553}
2554
2555static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2556{
2557
2558 if (skb_is_gso(skb)) {
2559 int err;
2560 if (skb_header_cloned(skb)) {
2561 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2562 if (err)
2563 return err;
2564 }
2565
2566 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2567 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2568 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2569 mac_iocb_ptr->total_hdrs_len =
2570 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2571 mac_iocb_ptr->net_trans_offset =
2572 cpu_to_le16(skb_network_offset(skb) |
2573 skb_transport_offset(skb)
2574 << OB_MAC_TRANSPORT_HDR_SHIFT);
2575 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2576 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2577 if (likely(skb->protocol == htons(ETH_P_IP))) {
2578 struct iphdr *iph = ip_hdr(skb);
2579 iph->check = 0;
2580 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2581 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2582 iph->daddr, 0,
2583 IPPROTO_TCP,
2584 0);
2585 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2586 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2587 tcp_hdr(skb)->check =
2588 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2589 &ipv6_hdr(skb)->daddr,
2590 0, IPPROTO_TCP, 0);
2591 }
2592 return 1;
2593 }
2594 return 0;
2595}
2596
2597static void ql_hw_csum_setup(struct sk_buff *skb,
2598 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2599{
2600 int len;
2601 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002602 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002603 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2604 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2605 mac_iocb_ptr->net_trans_offset =
2606 cpu_to_le16(skb_network_offset(skb) |
2607 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2608
2609 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2610 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2611 if (likely(iph->protocol == IPPROTO_TCP)) {
2612 check = &(tcp_hdr(skb)->check);
2613 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2614 mac_iocb_ptr->total_hdrs_len =
2615 cpu_to_le16(skb_transport_offset(skb) +
2616 (tcp_hdr(skb)->doff << 2));
2617 } else {
2618 check = &(udp_hdr(skb)->check);
2619 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2620 mac_iocb_ptr->total_hdrs_len =
2621 cpu_to_le16(skb_transport_offset(skb) +
2622 sizeof(struct udphdr));
2623 }
2624 *check = ~csum_tcpudp_magic(iph->saddr,
2625 iph->daddr, len, iph->protocol, 0);
2626}
2627
Stephen Hemminger613573252009-08-31 19:50:58 +00002628static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002629{
2630 struct tx_ring_desc *tx_ring_desc;
2631 struct ob_mac_iocb_req *mac_iocb_ptr;
2632 struct ql_adapter *qdev = netdev_priv(ndev);
2633 int tso;
2634 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002635 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002636
2637 tx_ring = &qdev->tx_ring[tx_ring_idx];
2638
Ron Mercer74c50b42009-03-09 10:59:27 +00002639 if (skb_padto(skb, ETH_ZLEN))
2640 return NETDEV_TX_OK;
2641
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002642 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002643 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002644 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002645 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002646 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002647 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002648 return NETDEV_TX_BUSY;
2649 }
2650 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2651 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002652 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002653
2654 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2655 mac_iocb_ptr->tid = tx_ring_desc->index;
2656 /* We use the upper 32-bits to store the tx queue for this IO.
2657 * When we get the completion we can use it to establish the context.
2658 */
2659 mac_iocb_ptr->txq_idx = tx_ring_idx;
2660 tx_ring_desc->skb = skb;
2661
2662 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2663
Jesse Grosseab6d182010-10-20 13:56:03 +00002664 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002665 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2666 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002667 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2668 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2669 }
2670 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2671 if (tso < 0) {
2672 dev_kfree_skb_any(skb);
2673 return NETDEV_TX_OK;
2674 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2675 ql_hw_csum_setup(skb,
2676 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2677 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002678 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2679 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002680 netif_err(qdev, tx_queued, qdev->ndev,
2681 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002682 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002683 return NETDEV_TX_BUSY;
2684 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002685 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2686 tx_ring->prod_idx++;
2687 if (tx_ring->prod_idx == tx_ring->wq_len)
2688 tx_ring->prod_idx = 0;
2689 wmb();
2690
2691 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002692 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2693 "tx queued, slot %d, len %d\n",
2694 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002695
2696 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002697
2698 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2699 netif_stop_subqueue(ndev, tx_ring->wq_id);
2700 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2701 /*
2702 * The queue got stopped because the tx_ring was full.
2703 * Wake it up, because it's now at least 25% empty.
2704 */
2705 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2706 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002707 return NETDEV_TX_OK;
2708}
2709
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002710
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002711static void ql_free_shadow_space(struct ql_adapter *qdev)
2712{
2713 if (qdev->rx_ring_shadow_reg_area) {
2714 pci_free_consistent(qdev->pdev,
2715 PAGE_SIZE,
2716 qdev->rx_ring_shadow_reg_area,
2717 qdev->rx_ring_shadow_reg_dma);
2718 qdev->rx_ring_shadow_reg_area = NULL;
2719 }
2720 if (qdev->tx_ring_shadow_reg_area) {
2721 pci_free_consistent(qdev->pdev,
2722 PAGE_SIZE,
2723 qdev->tx_ring_shadow_reg_area,
2724 qdev->tx_ring_shadow_reg_dma);
2725 qdev->tx_ring_shadow_reg_area = NULL;
2726 }
2727}
2728
2729static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2730{
2731 qdev->rx_ring_shadow_reg_area =
2732 pci_alloc_consistent(qdev->pdev,
2733 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2734 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002735 netif_err(qdev, ifup, qdev->ndev,
2736 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002737 return -ENOMEM;
2738 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002739 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002740 qdev->tx_ring_shadow_reg_area =
2741 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2742 &qdev->tx_ring_shadow_reg_dma);
2743 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002744 netif_err(qdev, ifup, qdev->ndev,
2745 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002746 goto err_wqp_sh_area;
2747 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002748 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002749 return 0;
2750
2751err_wqp_sh_area:
2752 pci_free_consistent(qdev->pdev,
2753 PAGE_SIZE,
2754 qdev->rx_ring_shadow_reg_area,
2755 qdev->rx_ring_shadow_reg_dma);
2756 return -ENOMEM;
2757}
2758
2759static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2760{
2761 struct tx_ring_desc *tx_ring_desc;
2762 int i;
2763 struct ob_mac_iocb_req *mac_iocb_ptr;
2764
2765 mac_iocb_ptr = tx_ring->wq_base;
2766 tx_ring_desc = tx_ring->q;
2767 for (i = 0; i < tx_ring->wq_len; i++) {
2768 tx_ring_desc->index = i;
2769 tx_ring_desc->skb = NULL;
2770 tx_ring_desc->queue_entry = mac_iocb_ptr;
2771 mac_iocb_ptr++;
2772 tx_ring_desc++;
2773 }
2774 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775}
2776
2777static void ql_free_tx_resources(struct ql_adapter *qdev,
2778 struct tx_ring *tx_ring)
2779{
2780 if (tx_ring->wq_base) {
2781 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2782 tx_ring->wq_base, tx_ring->wq_base_dma);
2783 tx_ring->wq_base = NULL;
2784 }
2785 kfree(tx_ring->q);
2786 tx_ring->q = NULL;
2787}
2788
2789static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2790 struct tx_ring *tx_ring)
2791{
2792 tx_ring->wq_base =
2793 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2794 &tx_ring->wq_base_dma);
2795
Joe Perches8e95a202009-12-03 07:58:21 +00002796 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002797 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2798 goto pci_alloc_err;
2799
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002800 tx_ring->q =
2801 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2802 if (tx_ring->q == NULL)
2803 goto err;
2804
2805 return 0;
2806err:
2807 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2808 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002809 tx_ring->wq_base = NULL;
2810pci_alloc_err:
2811 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002812 return -ENOMEM;
2813}
2814
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002815static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002816{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002817 struct bq_desc *lbq_desc;
2818
Ron Mercer7c734352009-10-19 03:32:19 +00002819 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002820
Ron Mercer7c734352009-10-19 03:32:19 +00002821 curr_idx = rx_ring->lbq_curr_idx;
2822 clean_idx = rx_ring->lbq_clean_idx;
2823 while (curr_idx != clean_idx) {
2824 lbq_desc = &rx_ring->lbq[curr_idx];
2825
2826 if (lbq_desc->p.pg_chunk.last_flag) {
2827 pci_unmap_page(qdev->pdev,
2828 lbq_desc->p.pg_chunk.map,
2829 ql_lbq_block_size(qdev),
2830 PCI_DMA_FROMDEVICE);
2831 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002832 }
Ron Mercer7c734352009-10-19 03:32:19 +00002833
2834 put_page(lbq_desc->p.pg_chunk.page);
2835 lbq_desc->p.pg_chunk.page = NULL;
2836
2837 if (++curr_idx == rx_ring->lbq_len)
2838 curr_idx = 0;
2839
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002840 }
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00002841 if (rx_ring->pg_chunk.page) {
2842 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2843 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2844 put_page(rx_ring->pg_chunk.page);
2845 rx_ring->pg_chunk.page = NULL;
2846 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002847}
2848
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002849static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002850{
2851 int i;
2852 struct bq_desc *sbq_desc;
2853
2854 for (i = 0; i < rx_ring->sbq_len; i++) {
2855 sbq_desc = &rx_ring->sbq[i];
2856 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002857 netif_err(qdev, ifup, qdev->ndev,
2858 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002859 return;
2860 }
2861 if (sbq_desc->p.skb) {
2862 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002863 dma_unmap_addr(sbq_desc, mapaddr),
2864 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002865 PCI_DMA_FROMDEVICE);
2866 dev_kfree_skb(sbq_desc->p.skb);
2867 sbq_desc->p.skb = NULL;
2868 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002869 }
2870}
2871
Ron Mercer4545a3f2009-02-23 10:42:17 +00002872/* Free all large and small rx buffers associated
2873 * with the completion queues for this device.
2874 */
2875static void ql_free_rx_buffers(struct ql_adapter *qdev)
2876{
2877 int i;
2878 struct rx_ring *rx_ring;
2879
2880 for (i = 0; i < qdev->rx_ring_count; i++) {
2881 rx_ring = &qdev->rx_ring[i];
2882 if (rx_ring->lbq)
2883 ql_free_lbq_buffers(qdev, rx_ring);
2884 if (rx_ring->sbq)
2885 ql_free_sbq_buffers(qdev, rx_ring);
2886 }
2887}
2888
2889static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2890{
2891 struct rx_ring *rx_ring;
2892 int i;
2893
2894 for (i = 0; i < qdev->rx_ring_count; i++) {
2895 rx_ring = &qdev->rx_ring[i];
2896 if (rx_ring->type != TX_Q)
2897 ql_update_buffer_queues(qdev, rx_ring);
2898 }
2899}
2900
2901static void ql_init_lbq_ring(struct ql_adapter *qdev,
2902 struct rx_ring *rx_ring)
2903{
2904 int i;
2905 struct bq_desc *lbq_desc;
2906 __le64 *bq = rx_ring->lbq_base;
2907
2908 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2909 for (i = 0; i < rx_ring->lbq_len; i++) {
2910 lbq_desc = &rx_ring->lbq[i];
2911 memset(lbq_desc, 0, sizeof(*lbq_desc));
2912 lbq_desc->index = i;
2913 lbq_desc->addr = bq;
2914 bq++;
2915 }
2916}
2917
2918static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002919 struct rx_ring *rx_ring)
2920{
2921 int i;
2922 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002923 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002924
Ron Mercer4545a3f2009-02-23 10:42:17 +00002925 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002926 for (i = 0; i < rx_ring->sbq_len; i++) {
2927 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002928 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002929 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002930 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002931 bq++;
2932 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002933}
2934
2935static void ql_free_rx_resources(struct ql_adapter *qdev,
2936 struct rx_ring *rx_ring)
2937{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002938 /* Free the small buffer queue. */
2939 if (rx_ring->sbq_base) {
2940 pci_free_consistent(qdev->pdev,
2941 rx_ring->sbq_size,
2942 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2943 rx_ring->sbq_base = NULL;
2944 }
2945
2946 /* Free the small buffer queue control blocks. */
2947 kfree(rx_ring->sbq);
2948 rx_ring->sbq = NULL;
2949
2950 /* Free the large buffer queue. */
2951 if (rx_ring->lbq_base) {
2952 pci_free_consistent(qdev->pdev,
2953 rx_ring->lbq_size,
2954 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2955 rx_ring->lbq_base = NULL;
2956 }
2957
2958 /* Free the large buffer queue control blocks. */
2959 kfree(rx_ring->lbq);
2960 rx_ring->lbq = NULL;
2961
2962 /* Free the rx queue. */
2963 if (rx_ring->cq_base) {
2964 pci_free_consistent(qdev->pdev,
2965 rx_ring->cq_size,
2966 rx_ring->cq_base, rx_ring->cq_base_dma);
2967 rx_ring->cq_base = NULL;
2968 }
2969}
2970
2971/* Allocate queues and buffers for this completions queue based
2972 * on the values in the parameter structure. */
2973static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2974 struct rx_ring *rx_ring)
2975{
2976
2977 /*
2978 * Allocate the completion queue for this rx_ring.
2979 */
2980 rx_ring->cq_base =
2981 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2982 &rx_ring->cq_base_dma);
2983
2984 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002985 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002986 return -ENOMEM;
2987 }
2988
2989 if (rx_ring->sbq_len) {
2990 /*
2991 * Allocate small buffer queue.
2992 */
2993 rx_ring->sbq_base =
2994 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2995 &rx_ring->sbq_base_dma);
2996
2997 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002998 netif_err(qdev, ifup, qdev->ndev,
2999 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003000 goto err_mem;
3001 }
3002
3003 /*
3004 * Allocate small buffer queue control blocks.
3005 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003006 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3007 sizeof(struct bq_desc),
3008 GFP_KERNEL);
3009 if (rx_ring->sbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003010 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003011
Ron Mercer4545a3f2009-02-23 10:42:17 +00003012 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003013 }
3014
3015 if (rx_ring->lbq_len) {
3016 /*
3017 * Allocate large buffer queue.
3018 */
3019 rx_ring->lbq_base =
3020 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3021 &rx_ring->lbq_base_dma);
3022
3023 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003024 netif_err(qdev, ifup, qdev->ndev,
3025 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003026 goto err_mem;
3027 }
3028 /*
3029 * Allocate large buffer queue control blocks.
3030 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003031 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3032 sizeof(struct bq_desc),
3033 GFP_KERNEL);
3034 if (rx_ring->lbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003035 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003036
Ron Mercer4545a3f2009-02-23 10:42:17 +00003037 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003038 }
3039
3040 return 0;
3041
3042err_mem:
3043 ql_free_rx_resources(qdev, rx_ring);
3044 return -ENOMEM;
3045}
3046
3047static void ql_tx_ring_clean(struct ql_adapter *qdev)
3048{
3049 struct tx_ring *tx_ring;
3050 struct tx_ring_desc *tx_ring_desc;
3051 int i, j;
3052
3053 /*
3054 * Loop through all queues and free
3055 * any resources.
3056 */
3057 for (j = 0; j < qdev->tx_ring_count; j++) {
3058 tx_ring = &qdev->tx_ring[j];
3059 for (i = 0; i < tx_ring->wq_len; i++) {
3060 tx_ring_desc = &tx_ring->q[i];
3061 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003062 netif_err(qdev, ifdown, qdev->ndev,
3063 "Freeing lost SKB %p, from queue %d, index %d.\n",
3064 tx_ring_desc->skb, j,
3065 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003066 ql_unmap_send(qdev, tx_ring_desc,
3067 tx_ring_desc->map_cnt);
3068 dev_kfree_skb(tx_ring_desc->skb);
3069 tx_ring_desc->skb = NULL;
3070 }
3071 }
3072 }
3073}
3074
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003075static void ql_free_mem_resources(struct ql_adapter *qdev)
3076{
3077 int i;
3078
3079 for (i = 0; i < qdev->tx_ring_count; i++)
3080 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3081 for (i = 0; i < qdev->rx_ring_count; i++)
3082 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3083 ql_free_shadow_space(qdev);
3084}
3085
3086static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3087{
3088 int i;
3089
3090 /* Allocate space for our shadow registers and such. */
3091 if (ql_alloc_shadow_space(qdev))
3092 return -ENOMEM;
3093
3094 for (i = 0; i < qdev->rx_ring_count; i++) {
3095 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003096 netif_err(qdev, ifup, qdev->ndev,
3097 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003098 goto err_mem;
3099 }
3100 }
3101 /* Allocate tx queue resources */
3102 for (i = 0; i < qdev->tx_ring_count; i++) {
3103 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003104 netif_err(qdev, ifup, qdev->ndev,
3105 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003106 goto err_mem;
3107 }
3108 }
3109 return 0;
3110
3111err_mem:
3112 ql_free_mem_resources(qdev);
3113 return -ENOMEM;
3114}
3115
3116/* Set up the rx ring control block and pass it to the chip.
3117 * The control block is defined as
3118 * "Completion Queue Initialization Control Block", or cqicb.
3119 */
3120static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3121{
3122 struct cqicb *cqicb = &rx_ring->cqicb;
3123 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003124 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003125 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003126 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003127 void __iomem *doorbell_area =
3128 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3129 int err = 0;
3130 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003131 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003132 __le64 *base_indirect_ptr;
3133 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003134
3135 /* Set up the shadow registers for this ring. */
3136 rx_ring->prod_idx_sh_reg = shadow_reg;
3137 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003138 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003139 shadow_reg += sizeof(u64);
3140 shadow_reg_dma += sizeof(u64);
3141 rx_ring->lbq_base_indirect = shadow_reg;
3142 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003143 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3144 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003145 rx_ring->sbq_base_indirect = shadow_reg;
3146 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3147
3148 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003149 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003150 rx_ring->cnsmr_idx = 0;
3151 rx_ring->curr_entry = rx_ring->cq_base;
3152
3153 /* PCI doorbell mem area + 0x04 for valid register */
3154 rx_ring->valid_db_reg = doorbell_area + 0x04;
3155
3156 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003157 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158
3159 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003160 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003161
3162 memset((void *)cqicb, 0, sizeof(struct cqicb));
3163 cqicb->msix_vect = rx_ring->irq;
3164
Ron Mercer459caf52009-01-04 17:08:11 -08003165 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3166 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003167
Ron Mercer97345522009-01-09 11:31:50 +00003168 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003169
Ron Mercer97345522009-01-09 11:31:50 +00003170 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003171
3172 /*
3173 * Set up the control block load flags.
3174 */
3175 cqicb->flags = FLAGS_LC | /* Load queue base address */
3176 FLAGS_LV | /* Load MSI-X vector */
3177 FLAGS_LI; /* Load irq delay values */
3178 if (rx_ring->lbq_len) {
3179 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003180 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003181 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003182 page_entries = 0;
3183 do {
3184 *base_indirect_ptr = cpu_to_le64(tmp);
3185 tmp += DB_PAGE_SIZE;
3186 base_indirect_ptr++;
3187 page_entries++;
3188 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003189 cqicb->lbq_addr =
3190 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003191 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3192 (u16) rx_ring->lbq_buf_size;
3193 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3194 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3195 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003196 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003197 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003198 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003199 rx_ring->lbq_clean_idx = 0;
3200 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003201 }
3202 if (rx_ring->sbq_len) {
3203 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003204 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003205 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003206 page_entries = 0;
3207 do {
3208 *base_indirect_ptr = cpu_to_le64(tmp);
3209 tmp += DB_PAGE_SIZE;
3210 base_indirect_ptr++;
3211 page_entries++;
3212 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003213 cqicb->sbq_addr =
3214 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003215 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003216 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003217 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3218 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003219 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003220 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003221 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003222 rx_ring->sbq_clean_idx = 0;
3223 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003224 }
3225 switch (rx_ring->type) {
3226 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003227 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3228 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3229 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 case RX_Q:
3231 /* Inbound completion handling rx_rings run in
3232 * separate NAPI contexts.
3233 */
3234 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3235 64);
3236 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3237 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3238 break;
3239 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003240 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3241 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003242 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003243 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3244 CFG_LCQ, rx_ring->cq_id);
3245 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003246 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003247 return err;
3248 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003249 return err;
3250}
3251
3252static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3253{
3254 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3255 void __iomem *doorbell_area =
3256 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3257 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3258 (tx_ring->wq_id * sizeof(u64));
3259 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3260 (tx_ring->wq_id * sizeof(u64));
3261 int err = 0;
3262
3263 /*
3264 * Assign doorbell registers for this tx_ring.
3265 */
3266 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003267 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003268 tx_ring->prod_idx = 0;
3269 /* TX PCI doorbell mem area + 0x04 */
3270 tx_ring->valid_db_reg = doorbell_area + 0x04;
3271
3272 /*
3273 * Assign shadow registers for this tx_ring.
3274 */
3275 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3276 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3277
3278 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3279 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3280 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3281 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3282 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003283 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003284
Ron Mercer97345522009-01-09 11:31:50 +00003285 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003286
3287 ql_init_tx_ring(qdev, tx_ring);
3288
Ron Mercere3324712009-07-02 06:06:13 +00003289 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003290 (u16) tx_ring->wq_id);
3291 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003292 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003293 return err;
3294 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003295 return err;
3296}
3297
3298static void ql_disable_msix(struct ql_adapter *qdev)
3299{
3300 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3301 pci_disable_msix(qdev->pdev);
3302 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3303 kfree(qdev->msi_x_entry);
3304 qdev->msi_x_entry = NULL;
3305 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3306 pci_disable_msi(qdev->pdev);
3307 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3308 }
3309}
3310
Ron Mercera4ab6132009-08-27 11:02:10 +00003311/* We start by trying to get the number of vectors
3312 * stored in qdev->intr_count. If we don't get that
3313 * many then we reduce the count and try again.
3314 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003315static void ql_enable_msix(struct ql_adapter *qdev)
3316{
Ron Mercera4ab6132009-08-27 11:02:10 +00003317 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003318
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003319 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003320 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003321 /* Try to alloc space for the msix struct,
3322 * if it fails then go to MSI/legacy.
3323 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003324 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003325 sizeof(struct msix_entry),
3326 GFP_KERNEL);
3327 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003328 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003329 goto msi;
3330 }
3331
Ron Mercera4ab6132009-08-27 11:02:10 +00003332 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003333 qdev->msi_x_entry[i].entry = i;
3334
Ron Mercera4ab6132009-08-27 11:02:10 +00003335 /* Loop to get our vectors. We start with
3336 * what we want and settle for what we get.
3337 */
3338 do {
3339 err = pci_enable_msix(qdev->pdev,
3340 qdev->msi_x_entry, qdev->intr_count);
3341 if (err > 0)
3342 qdev->intr_count = err;
3343 } while (err > 0);
3344
3345 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003346 kfree(qdev->msi_x_entry);
3347 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003348 netif_warn(qdev, ifup, qdev->ndev,
3349 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003350 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003351 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003352 } else if (err == 0) {
3353 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003354 netif_info(qdev, ifup, qdev->ndev,
3355 "MSI-X Enabled, got %d vectors.\n",
3356 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003357 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003358 }
3359 }
3360msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003361 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003362 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003363 if (!pci_enable_msi(qdev->pdev)) {
3364 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003365 netif_info(qdev, ifup, qdev->ndev,
3366 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003367 return;
3368 }
3369 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003370 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003371 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3372 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003373}
3374
Ron Mercer39aa8162009-08-27 11:02:11 +00003375/* Each vector services 1 RSS ring and and 1 or more
3376 * TX completion rings. This function loops through
3377 * the TX completion rings and assigns the vector that
3378 * will service it. An example would be if there are
3379 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3380 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003381 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003382 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3383 */
3384static void ql_set_tx_vect(struct ql_adapter *qdev)
3385{
3386 int i, j, vect;
3387 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3388
3389 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3390 /* Assign irq vectors to TX rx_rings.*/
3391 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3392 i < qdev->rx_ring_count; i++) {
3393 if (j == tx_rings_per_vector) {
3394 vect++;
3395 j = 0;
3396 }
3397 qdev->rx_ring[i].irq = vect;
3398 j++;
3399 }
3400 } else {
3401 /* For single vector all rings have an irq
3402 * of zero.
3403 */
3404 for (i = 0; i < qdev->rx_ring_count; i++)
3405 qdev->rx_ring[i].irq = 0;
3406 }
3407}
3408
3409/* Set the interrupt mask for this vector. Each vector
3410 * will service 1 RSS ring and 1 or more TX completion
3411 * rings. This function sets up a bit mask per vector
3412 * that indicates which rings it services.
3413 */
3414static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3415{
3416 int j, vect = ctx->intr;
3417 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3418
3419 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3420 /* Add the RSS ring serviced by this vector
3421 * to the mask.
3422 */
3423 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3424 /* Add the TX ring(s) serviced by this vector
3425 * to the mask. */
3426 for (j = 0; j < tx_rings_per_vector; j++) {
3427 ctx->irq_mask |=
3428 (1 << qdev->rx_ring[qdev->rss_ring_count +
3429 (vect * tx_rings_per_vector) + j].cq_id);
3430 }
3431 } else {
3432 /* For single vector we just shift each queue's
3433 * ID into the mask.
3434 */
3435 for (j = 0; j < qdev->rx_ring_count; j++)
3436 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3437 }
3438}
3439
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003440/*
3441 * Here we build the intr_context structures based on
3442 * our rx_ring count and intr vector count.
3443 * The intr_context structure is used to hook each vector
3444 * to possibly different handlers.
3445 */
3446static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3447{
3448 int i = 0;
3449 struct intr_context *intr_context = &qdev->intr_context[0];
3450
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003451 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3452 /* Each rx_ring has it's
3453 * own intr_context since we have separate
3454 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003455 */
3456 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3457 qdev->rx_ring[i].irq = i;
3458 intr_context->intr = i;
3459 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003460 /* Set up this vector's bit-mask that indicates
3461 * which queues it services.
3462 */
3463 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003464 /*
3465 * We set up each vectors enable/disable/read bits so
3466 * there's no bit/mask calculations in the critical path.
3467 */
3468 intr_context->intr_en_mask =
3469 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3470 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3471 | i;
3472 intr_context->intr_dis_mask =
3473 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3474 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3475 INTR_EN_IHD | i;
3476 intr_context->intr_read_mask =
3477 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3478 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3479 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003480 if (i == 0) {
3481 /* The first vector/queue handles
3482 * broadcast/multicast, fatal errors,
3483 * and firmware events. This in addition
3484 * to normal inbound NAPI processing.
3485 */
3486 intr_context->handler = qlge_isr;
3487 sprintf(intr_context->name, "%s-rx-%d",
3488 qdev->ndev->name, i);
3489 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003490 /*
3491 * Inbound queues handle unicast frames only.
3492 */
3493 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003494 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003495 qdev->ndev->name, i);
3496 }
3497 }
3498 } else {
3499 /*
3500 * All rx_rings use the same intr_context since
3501 * there is only one vector.
3502 */
3503 intr_context->intr = 0;
3504 intr_context->qdev = qdev;
3505 /*
3506 * We set up each vectors enable/disable/read bits so
3507 * there's no bit/mask calculations in the critical path.
3508 */
3509 intr_context->intr_en_mask =
3510 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3511 intr_context->intr_dis_mask =
3512 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3513 INTR_EN_TYPE_DISABLE;
3514 intr_context->intr_read_mask =
3515 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3516 /*
3517 * Single interrupt means one handler for all rings.
3518 */
3519 intr_context->handler = qlge_isr;
3520 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003521 /* Set up this vector's bit-mask that indicates
3522 * which queues it services. In this case there is
3523 * a single vector so it will service all RSS and
3524 * TX completion rings.
3525 */
3526 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003527 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003528 /* Tell the TX completion rings which MSIx vector
3529 * they will be using.
3530 */
3531 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003532}
3533
3534static void ql_free_irq(struct ql_adapter *qdev)
3535{
3536 int i;
3537 struct intr_context *intr_context = &qdev->intr_context[0];
3538
3539 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3540 if (intr_context->hooked) {
3541 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3542 free_irq(qdev->msi_x_entry[i].vector,
3543 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544 } else {
3545 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003546 }
3547 }
3548 }
3549 ql_disable_msix(qdev);
3550}
3551
3552static int ql_request_irq(struct ql_adapter *qdev)
3553{
3554 int i;
3555 int status = 0;
3556 struct pci_dev *pdev = qdev->pdev;
3557 struct intr_context *intr_context = &qdev->intr_context[0];
3558
3559 ql_resolve_queues_to_irqs(qdev);
3560
3561 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3562 atomic_set(&intr_context->irq_cnt, 0);
3563 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3564 status = request_irq(qdev->msi_x_entry[i].vector,
3565 intr_context->handler,
3566 0,
3567 intr_context->name,
3568 &qdev->rx_ring[i]);
3569 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003570 netif_err(qdev, ifup, qdev->ndev,
3571 "Failed request for MSIX interrupt %d.\n",
3572 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003573 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003574 }
3575 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003576 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3577 "trying msi or legacy interrupts.\n");
3578 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3579 "%s: irq = %d.\n", __func__, pdev->irq);
3580 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3581 "%s: context->name = %s.\n", __func__,
3582 intr_context->name);
3583 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3584 "%s: dev_id = 0x%p.\n", __func__,
3585 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586 status =
3587 request_irq(pdev->irq, qlge_isr,
3588 test_bit(QL_MSI_ENABLED,
3589 &qdev->
3590 flags) ? 0 : IRQF_SHARED,
3591 intr_context->name, &qdev->rx_ring[0]);
3592 if (status)
3593 goto err_irq;
3594
Joe Perchesae9540f72010-02-09 11:49:52 +00003595 netif_err(qdev, ifup, qdev->ndev,
3596 "Hooked intr %d, queue type %s, with name %s.\n",
3597 i,
3598 qdev->rx_ring[0].type == DEFAULT_Q ?
3599 "DEFAULT_Q" :
3600 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3601 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3602 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003603 }
3604 intr_context->hooked = 1;
3605 }
3606 return status;
3607err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003608 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003609 ql_free_irq(qdev);
3610 return status;
3611}
3612
3613static int ql_start_rss(struct ql_adapter *qdev)
3614{
Joe Perches215faf92010-12-21 02:16:10 -08003615 static const u8 init_hash_seed[] = {
3616 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3617 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3618 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3619 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3620 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3621 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003622 struct ricb *ricb = &qdev->ricb;
3623 int status = 0;
3624 int i;
3625 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3626
Ron Mercere3324712009-07-02 06:06:13 +00003627 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003628
Ron Mercerb2014ff2009-08-27 11:02:09 +00003629 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003630 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003631 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3632 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003633
3634 /*
3635 * Fill out the Indirection Table.
3636 */
Ron Mercer541ae282009-10-08 09:54:37 +00003637 for (i = 0; i < 1024; i++)
3638 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003639
Ron Mercer541ae282009-10-08 09:54:37 +00003640 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3641 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003642
Ron Mercere3324712009-07-02 06:06:13 +00003643 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003644 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003645 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003646 return status;
3647 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003648 return status;
3649}
3650
Ron Mercera5f59dc2009-07-02 06:06:07 +00003651static int ql_clear_routing_entries(struct ql_adapter *qdev)
3652{
3653 int i, status = 0;
3654
3655 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3656 if (status)
3657 return status;
3658 /* Clear all the entries in the routing table. */
3659 for (i = 0; i < 16; i++) {
3660 status = ql_set_routing_reg(qdev, i, 0, 0);
3661 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003662 netif_err(qdev, ifup, qdev->ndev,
3663 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003664 break;
3665 }
3666 }
3667 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3668 return status;
3669}
3670
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003671/* Initialize the frame-to-queue routing. */
3672static int ql_route_initialize(struct ql_adapter *qdev)
3673{
3674 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003675
3676 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003677 status = ql_clear_routing_entries(qdev);
3678 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003679 return status;
3680
3681 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3682 if (status)
3683 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003684
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003685 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3686 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003687 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003688 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003689 "Failed to init routing register "
3690 "for IP CSUM error packets.\n");
3691 goto exit;
3692 }
3693 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3694 RT_IDX_TU_CSUM_ERR, 1);
3695 if (status) {
3696 netif_err(qdev, ifup, qdev->ndev,
3697 "Failed to init routing register "
3698 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003699 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003700 }
3701 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3702 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003703 netif_err(qdev, ifup, qdev->ndev,
3704 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003705 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003706 }
3707 /* If we have more than one inbound queue, then turn on RSS in the
3708 * routing block.
3709 */
3710 if (qdev->rss_ring_count > 1) {
3711 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3712 RT_IDX_RSS_MATCH, 1);
3713 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003714 netif_err(qdev, ifup, qdev->ndev,
3715 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003716 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003717 }
3718 }
3719
3720 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3721 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003722 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003723 netif_err(qdev, ifup, qdev->ndev,
3724 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003725exit:
3726 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003727 return status;
3728}
3729
Ron Mercer2ee1e272009-03-03 12:10:33 +00003730int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003731{
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003732 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003733
Ron Mercer7fab3bfe2009-07-02 06:06:11 +00003734 /* If check if the link is up and use to
3735 * determine if we are setting or clearing
3736 * the MAC address in the CAM.
3737 */
3738 set = ql_read32(qdev, STS);
3739 set &= qdev->port_link_up;
3740 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003741 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003742 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003743 return status;
3744 }
3745
3746 status = ql_route_initialize(qdev);
3747 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003748 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003749
3750 return status;
3751}
3752
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003753static int ql_adapter_initialize(struct ql_adapter *qdev)
3754{
3755 u32 value, mask;
3756 int i;
3757 int status = 0;
3758
3759 /*
3760 * Set up the System register to halt on errors.
3761 */
3762 value = SYS_EFE | SYS_FAE;
3763 mask = value << 16;
3764 ql_write32(qdev, SYS, mask | value);
3765
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003766 /* Set the default queue, and VLAN behavior. */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04003767 value = NIC_RCV_CFG_DFQ;
3768 mask = NIC_RCV_CFG_DFQ_MASK;
3769 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3770 value |= NIC_RCV_CFG_RV;
3771 mask |= (NIC_RCV_CFG_RV << 16);
3772 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003773 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3774
3775 /* Set the MPI interrupt to enabled. */
3776 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3777
3778 /* Enable the function, set pagesize, enable error checking. */
3779 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003780 FSC_EC | FSC_VM_PAGE_4K;
3781 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003782
3783 /* Set/clear header splitting. */
3784 mask = FSC_VM_PAGESIZE_MASK |
3785 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3786 ql_write32(qdev, FSC, mask | value);
3787
Ron Mercer572c5262010-01-02 10:37:42 +00003788 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003789
Ron Mercera3b71932009-10-08 09:54:38 +00003790 /* Set RX packet routing to use port/pci function on which the
3791 * packet arrived on in addition to usual frame routing.
3792 * This is helpful on bonding where both interfaces can have
3793 * the same MAC address.
3794 */
3795 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003796 /* Reroute all packets to our Interface.
3797 * They may have been routed to MPI firmware
3798 * due to WOL.
3799 */
3800 value = ql_read32(qdev, MGMT_RCV_CFG);
3801 value &= ~MGMT_RCV_CFG_RM;
3802 mask = 0xffff0000;
3803
3804 /* Sticky reg needs clearing due to WOL. */
3805 ql_write32(qdev, MGMT_RCV_CFG, mask);
3806 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3807
3808 /* Default WOL is enable on Mezz cards */
3809 if (qdev->pdev->subsystem_device == 0x0068 ||
3810 qdev->pdev->subsystem_device == 0x0180)
3811 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003812
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003813 /* Start up the rx queues. */
3814 for (i = 0; i < qdev->rx_ring_count; i++) {
3815 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3816 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003817 netif_err(qdev, ifup, qdev->ndev,
3818 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003819 return status;
3820 }
3821 }
3822
3823 /* If there is more than one inbound completion queue
3824 * then download a RICB to configure RSS.
3825 */
3826 if (qdev->rss_ring_count > 1) {
3827 status = ql_start_rss(qdev);
3828 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003829 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003830 return status;
3831 }
3832 }
3833
3834 /* Start up the tx queues. */
3835 for (i = 0; i < qdev->tx_ring_count; i++) {
3836 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3837 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003838 netif_err(qdev, ifup, qdev->ndev,
3839 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003840 return status;
3841 }
3842 }
3843
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003844 /* Initialize the port and set the max framesize. */
3845 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003846 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003847 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003848
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003849 /* Set up the MAC address and frame routing filter. */
3850 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003851 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003852 netif_err(qdev, ifup, qdev->ndev,
3853 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003854 return status;
3855 }
3856
3857 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003858 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003859 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003860
3861 return status;
3862}
3863
3864/* Issue soft reset to chip. */
3865static int ql_adapter_reset(struct ql_adapter *qdev)
3866{
3867 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003868 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003869 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003870
Ron Mercera5f59dc2009-07-02 06:06:07 +00003871 /* Clear all the entries in the routing table. */
3872 status = ql_clear_routing_entries(qdev);
3873 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003874 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003875 return status;
3876 }
3877
3878 end_jiffies = jiffies +
3879 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003880
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003881 /* Check if bit is set then skip the mailbox command and
3882 * clear the bit, else we are in normal reset process.
3883 */
3884 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3885 /* Stop management traffic. */
3886 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003887
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003888 /* Wait for the NIC and MGMNT FIFOs to empty. */
3889 ql_wait_fifo_empty(qdev);
3890 } else
3891 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003892
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003893 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003894
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003895 do {
3896 value = ql_read32(qdev, RST_FO);
3897 if ((value & RST_FO_FR) == 0)
3898 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003899 cpu_relax();
3900 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003901
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003902 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003903 netif_err(qdev, ifdown, qdev->ndev,
3904 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003905 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003906 }
3907
Ron Mercer84087f42009-10-08 09:54:41 +00003908 /* Resume management traffic. */
3909 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003910 return status;
3911}
3912
3913static void ql_display_dev_info(struct net_device *ndev)
3914{
Joe Perchesb16fed02010-11-15 11:12:28 +00003915 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003916
Joe Perchesae9540f72010-02-09 11:49:52 +00003917 netif_info(qdev, probe, qdev->ndev,
3918 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3919 "XG Roll = %d, XG Rev = %d.\n",
3920 qdev->func,
3921 qdev->port,
3922 qdev->chip_rev_id & 0x0000000f,
3923 qdev->chip_rev_id >> 4 & 0x0000000f,
3924 qdev->chip_rev_id >> 8 & 0x0000000f,
3925 qdev->chip_rev_id >> 12 & 0x0000000f);
3926 netif_info(qdev, probe, qdev->ndev,
3927 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003928}
3929
stephen hemmingerac409212010-10-21 07:50:54 +00003930static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003931{
3932 int status = 0;
3933 u32 wol = MB_WOL_DISABLE;
3934
3935 /* The CAM is still intact after a reset, but if we
3936 * are doing WOL, then we may need to program the
3937 * routing regs. We would also need to issue the mailbox
3938 * commands to instruct the MPI what to do per the ethtool
3939 * settings.
3940 */
3941
3942 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3943 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003944 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003945 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003946 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003947 return -EINVAL;
3948 }
3949
3950 if (qdev->wol & WAKE_MAGIC) {
3951 status = ql_mb_wol_set_magic(qdev, 1);
3952 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003953 netif_err(qdev, ifdown, qdev->ndev,
3954 "Failed to set magic packet on %s.\n",
3955 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003956 return status;
3957 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003958 netif_info(qdev, drv, qdev->ndev,
3959 "Enabled magic packet successfully on %s.\n",
3960 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003961
3962 wol |= MB_WOL_MAGIC_PKT;
3963 }
3964
3965 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003966 wol |= MB_WOL_MODE_ON;
3967 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003968 netif_err(qdev, drv, qdev->ndev,
3969 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003970 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003971 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003972 }
3973
3974 return status;
3975}
3976
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003977static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003978{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003979
Ron Mercer6497b602009-02-12 16:37:13 -08003980 /* Don't kill the reset worker thread if we
3981 * are in the process of recovery.
3982 */
3983 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3984 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003985 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3986 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003987 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003988 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003989 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003990}
3991
3992static int ql_adapter_down(struct ql_adapter *qdev)
3993{
3994 int i, status = 0;
3995
3996 ql_link_off(qdev);
3997
3998 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003999
Ron Mercer39aa8162009-08-27 11:02:11 +00004000 for (i = 0; i < qdev->rss_ring_count; i++)
4001 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004002
4003 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4004
4005 ql_disable_interrupts(qdev);
4006
4007 ql_tx_ring_clean(qdev);
4008
Ron Mercer6b318cb2009-03-09 10:59:26 +00004009 /* Call netif_napi_del() from common point.
4010 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004011 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00004012 netif_napi_del(&qdev->rx_ring[i].napi);
4013
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004014 status = ql_adapter_reset(qdev);
4015 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004016 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4017 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00004018 ql_free_rx_buffers(qdev);
4019
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004020 return status;
4021}
4022
4023static int ql_adapter_up(struct ql_adapter *qdev)
4024{
4025 int err = 0;
4026
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004027 err = ql_adapter_initialize(qdev);
4028 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004029 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004030 goto err_init;
4031 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004032 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00004033 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00004034 /* If the port is initialized and the
4035 * link is up the turn on the carrier.
4036 */
4037 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4038 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00004039 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004040 /* Restore rx mode. */
4041 clear_bit(QL_ALLMULTI, &qdev->flags);
4042 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4043 qlge_set_multicast_list(qdev->ndev);
4044
Ron Mercerc1b60092010-10-27 04:58:12 +00004045 /* Restore vlan setting. */
4046 qlge_restore_vlan(qdev);
4047
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004048 ql_enable_interrupts(qdev);
4049 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004050 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004051
4052 return 0;
4053err_init:
4054 ql_adapter_reset(qdev);
4055 return err;
4056}
4057
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004058static void ql_release_adapter_resources(struct ql_adapter *qdev)
4059{
4060 ql_free_mem_resources(qdev);
4061 ql_free_irq(qdev);
4062}
4063
4064static int ql_get_adapter_resources(struct ql_adapter *qdev)
4065{
4066 int status = 0;
4067
4068 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004069 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004070 return -ENOMEM;
4071 }
4072 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004073 return status;
4074}
4075
4076static int qlge_close(struct net_device *ndev)
4077{
4078 struct ql_adapter *qdev = netdev_priv(ndev);
4079
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004080 /* If we hit pci_channel_io_perm_failure
4081 * failure condition, then we already
4082 * brought the adapter down.
4083 */
4084 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004085 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004086 clear_bit(QL_EEH_FATAL, &qdev->flags);
4087 return 0;
4088 }
4089
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004090 /*
4091 * Wait for device to recover from a reset.
4092 * (Rarely happens, but possible.)
4093 */
4094 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4095 msleep(1);
4096 ql_adapter_down(qdev);
4097 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004098 return 0;
4099}
4100
4101static int ql_configure_rings(struct ql_adapter *qdev)
4102{
4103 int i;
4104 struct rx_ring *rx_ring;
4105 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004106 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004107 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4108 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4109
4110 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004111
Ron Mercera4ab6132009-08-27 11:02:10 +00004112 /* In a perfect world we have one RSS ring for each CPU
4113 * and each has it's own vector. To do that we ask for
4114 * cpu_cnt vectors. ql_enable_msix() will adjust the
4115 * vector count to what we actually get. We then
4116 * allocate an RSS ring for each.
4117 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004118 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004119 qdev->intr_count = cpu_cnt;
4120 ql_enable_msix(qdev);
4121 /* Adjust the RSS ring count to the actual vector count. */
4122 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004123 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004124 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004125
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004126 for (i = 0; i < qdev->tx_ring_count; i++) {
4127 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004128 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004129 tx_ring->qdev = qdev;
4130 tx_ring->wq_id = i;
4131 tx_ring->wq_len = qdev->tx_ring_size;
4132 tx_ring->wq_size =
4133 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4134
4135 /*
4136 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004137 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004138 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004139 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004140 }
4141
4142 for (i = 0; i < qdev->rx_ring_count; i++) {
4143 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004144 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004145 rx_ring->qdev = qdev;
4146 rx_ring->cq_id = i;
4147 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004148 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004149 /*
4150 * Inbound (RSS) queues.
4151 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004152 rx_ring->cq_len = qdev->rx_ring_size;
4153 rx_ring->cq_size =
4154 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4155 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4156 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004157 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004158 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004159 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4160 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004161 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004162 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004163 rx_ring->type = RX_Q;
4164 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004165 /*
4166 * Outbound queue handles outbound completions only.
4167 */
4168 /* outbound cq is same size as tx_ring it services. */
4169 rx_ring->cq_len = qdev->tx_ring_size;
4170 rx_ring->cq_size =
4171 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4172 rx_ring->lbq_len = 0;
4173 rx_ring->lbq_size = 0;
4174 rx_ring->lbq_buf_size = 0;
4175 rx_ring->sbq_len = 0;
4176 rx_ring->sbq_size = 0;
4177 rx_ring->sbq_buf_size = 0;
4178 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004179 }
4180 }
4181 return 0;
4182}
4183
4184static int qlge_open(struct net_device *ndev)
4185{
4186 int err = 0;
4187 struct ql_adapter *qdev = netdev_priv(ndev);
4188
Ron Mercer74e12432009-11-11 12:54:04 +00004189 err = ql_adapter_reset(qdev);
4190 if (err)
4191 return err;
4192
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004193 err = ql_configure_rings(qdev);
4194 if (err)
4195 return err;
4196
4197 err = ql_get_adapter_resources(qdev);
4198 if (err)
4199 goto error_up;
4200
4201 err = ql_adapter_up(qdev);
4202 if (err)
4203 goto error_up;
4204
4205 return err;
4206
4207error_up:
4208 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004209 return err;
4210}
4211
Ron Mercer7c734352009-10-19 03:32:19 +00004212static int ql_change_rx_buffers(struct ql_adapter *qdev)
4213{
4214 struct rx_ring *rx_ring;
4215 int i, status;
4216 u32 lbq_buf_len;
4217
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004218 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004219 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4220 int i = 3;
4221 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004222 netif_err(qdev, ifup, qdev->ndev,
4223 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004224 ssleep(1);
4225 }
4226
4227 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004228 netif_err(qdev, ifup, qdev->ndev,
4229 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004230 return -ETIMEDOUT;
4231 }
4232 }
4233
4234 status = ql_adapter_down(qdev);
4235 if (status)
4236 goto error;
4237
4238 /* Get the new rx buffer size. */
4239 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4240 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4241 qdev->lbq_buf_order = get_order(lbq_buf_len);
4242
4243 for (i = 0; i < qdev->rss_ring_count; i++) {
4244 rx_ring = &qdev->rx_ring[i];
4245 /* Set the new size. */
4246 rx_ring->lbq_buf_size = lbq_buf_len;
4247 }
4248
4249 status = ql_adapter_up(qdev);
4250 if (status)
4251 goto error;
4252
4253 return status;
4254error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004255 netif_alert(qdev, ifup, qdev->ndev,
4256 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004257 set_bit(QL_ADAPTER_UP, &qdev->flags);
4258 dev_close(qdev->ndev);
4259 return status;
4260}
4261
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004262static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4263{
4264 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004265 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004266
4267 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004268 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004269 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004270 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004271 } else
4272 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004273
4274 queue_delayed_work(qdev->workqueue,
4275 &qdev->mpi_port_cfg_work, 3*HZ);
4276
Breno Leitao746079d2010-02-04 10:11:19 +00004277 ndev->mtu = new_mtu;
4278
Ron Mercer7c734352009-10-19 03:32:19 +00004279 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004280 return 0;
4281 }
4282
Ron Mercer7c734352009-10-19 03:32:19 +00004283 status = ql_change_rx_buffers(qdev);
4284 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004285 netif_err(qdev, ifup, qdev->ndev,
4286 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004287 }
4288
4289 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004290}
4291
4292static struct net_device_stats *qlge_get_stats(struct net_device
4293 *ndev)
4294{
Ron Mercer885ee392009-11-03 13:49:31 +00004295 struct ql_adapter *qdev = netdev_priv(ndev);
4296 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4297 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4298 unsigned long pkts, mcast, dropped, errors, bytes;
4299 int i;
4300
4301 /* Get RX stats. */
4302 pkts = mcast = dropped = errors = bytes = 0;
4303 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4304 pkts += rx_ring->rx_packets;
4305 bytes += rx_ring->rx_bytes;
4306 dropped += rx_ring->rx_dropped;
4307 errors += rx_ring->rx_errors;
4308 mcast += rx_ring->rx_multicast;
4309 }
4310 ndev->stats.rx_packets = pkts;
4311 ndev->stats.rx_bytes = bytes;
4312 ndev->stats.rx_dropped = dropped;
4313 ndev->stats.rx_errors = errors;
4314 ndev->stats.multicast = mcast;
4315
4316 /* Get TX stats. */
4317 pkts = errors = bytes = 0;
4318 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4319 pkts += tx_ring->tx_packets;
4320 bytes += tx_ring->tx_bytes;
4321 errors += tx_ring->tx_errors;
4322 }
4323 ndev->stats.tx_packets = pkts;
4324 ndev->stats.tx_bytes = bytes;
4325 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004326 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004327}
4328
stephen hemmingerac409212010-10-21 07:50:54 +00004329static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004330{
Joe Perchesb16fed02010-11-15 11:12:28 +00004331 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004332 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004333 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004334
Ron Mercercc288f52009-02-23 10:42:14 +00004335 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4336 if (status)
4337 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004338 /*
4339 * Set or clear promiscuous mode if a
4340 * transition is taking place.
4341 */
4342 if (ndev->flags & IFF_PROMISC) {
4343 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4344 if (ql_set_routing_reg
4345 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004346 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004347 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004348 } else {
4349 set_bit(QL_PROMISCUOUS, &qdev->flags);
4350 }
4351 }
4352 } else {
4353 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4354 if (ql_set_routing_reg
4355 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004356 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004357 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004358 } else {
4359 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4360 }
4361 }
4362 }
4363
4364 /*
4365 * Set or clear all multicast mode if a
4366 * transition is taking place.
4367 */
4368 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004369 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004370 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4371 if (ql_set_routing_reg
4372 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004373 netif_err(qdev, hw, qdev->ndev,
4374 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004375 } else {
4376 set_bit(QL_ALLMULTI, &qdev->flags);
4377 }
4378 }
4379 } else {
4380 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4381 if (ql_set_routing_reg
4382 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004383 netif_err(qdev, hw, qdev->ndev,
4384 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004385 } else {
4386 clear_bit(QL_ALLMULTI, &qdev->flags);
4387 }
4388 }
4389 }
4390
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004391 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004392 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4393 if (status)
4394 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004395 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004396 netdev_for_each_mc_addr(ha, ndev) {
4397 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004398 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004399 netif_err(qdev, hw, qdev->ndev,
4400 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004401 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004402 goto exit;
4403 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004404 i++;
4405 }
Ron Mercercc288f52009-02-23 10:42:14 +00004406 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004407 if (ql_set_routing_reg
4408 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004409 netif_err(qdev, hw, qdev->ndev,
4410 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004411 } else {
4412 set_bit(QL_ALLMULTI, &qdev->flags);
4413 }
4414 }
4415exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004416 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004417}
4418
4419static int qlge_set_mac_address(struct net_device *ndev, void *p)
4420{
Joe Perchesb16fed02010-11-15 11:12:28 +00004421 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004422 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004423 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004424
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004425 if (!is_valid_ether_addr(addr->sa_data))
4426 return -EADDRNOTAVAIL;
4427 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004428 /* Update local copy of current mac address. */
4429 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004430
Ron Mercercc288f52009-02-23 10:42:14 +00004431 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4432 if (status)
4433 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004434 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4435 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004436 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004437 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004438 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4439 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004440}
4441
4442static void qlge_tx_timeout(struct net_device *ndev)
4443{
Joe Perchesb16fed02010-11-15 11:12:28 +00004444 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004445 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004446}
4447
4448static void ql_asic_reset_work(struct work_struct *work)
4449{
4450 struct ql_adapter *qdev =
4451 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004452 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004453 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004454 status = ql_adapter_down(qdev);
4455 if (status)
4456 goto error;
4457
4458 status = ql_adapter_up(qdev);
4459 if (status)
4460 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004461
4462 /* Restore rx mode. */
4463 clear_bit(QL_ALLMULTI, &qdev->flags);
4464 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4465 qlge_set_multicast_list(qdev->ndev);
4466
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004467 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004468 return;
4469error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004470 netif_alert(qdev, ifup, qdev->ndev,
4471 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004472
Ron Mercerdb988122009-03-09 10:59:17 +00004473 set_bit(QL_ADAPTER_UP, &qdev->flags);
4474 dev_close(qdev->ndev);
4475 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004476}
4477
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004478static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004479 .get_flash = ql_get_8012_flash_params,
4480 .port_initialize = ql_8012_port_initialize,
4481};
4482
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004483static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004484 .get_flash = ql_get_8000_flash_params,
4485 .port_initialize = ql_8000_port_initialize,
4486};
4487
Ron Mercere4552f52009-06-09 05:39:32 +00004488/* Find the pcie function number for the other NIC
4489 * on this chip. Since both NIC functions share a
4490 * common firmware we have the lowest enabled function
4491 * do any common work. Examples would be resetting
4492 * after a fatal firmware error, or doing a firmware
4493 * coredump.
4494 */
4495static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004496{
Ron Mercere4552f52009-06-09 05:39:32 +00004497 int status = 0;
4498 u32 temp;
4499 u32 nic_func1, nic_func2;
4500
4501 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4502 &temp);
4503 if (status)
4504 return status;
4505
4506 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4507 MPI_TEST_NIC_FUNC_MASK);
4508 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4509 MPI_TEST_NIC_FUNC_MASK);
4510
4511 if (qdev->func == nic_func1)
4512 qdev->alt_func = nic_func2;
4513 else if (qdev->func == nic_func2)
4514 qdev->alt_func = nic_func1;
4515 else
4516 status = -EIO;
4517
4518 return status;
4519}
4520
4521static int ql_get_board_info(struct ql_adapter *qdev)
4522{
4523 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004524 qdev->func =
4525 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004526 if (qdev->func > 3)
4527 return -EIO;
4528
4529 status = ql_get_alt_pcie_func(qdev);
4530 if (status)
4531 return status;
4532
4533 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4534 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004535 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4536 qdev->port_link_up = STS_PL1;
4537 qdev->port_init = STS_PI1;
4538 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4539 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4540 } else {
4541 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4542 qdev->port_link_up = STS_PL0;
4543 qdev->port_init = STS_PI0;
4544 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4545 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4546 }
4547 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004548 qdev->device_id = qdev->pdev->device;
4549 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4550 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004551 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4552 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004553 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004554}
4555
4556static void ql_release_all(struct pci_dev *pdev)
4557{
4558 struct net_device *ndev = pci_get_drvdata(pdev);
4559 struct ql_adapter *qdev = netdev_priv(ndev);
4560
4561 if (qdev->workqueue) {
4562 destroy_workqueue(qdev->workqueue);
4563 qdev->workqueue = NULL;
4564 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004565
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004566 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004567 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004568 if (qdev->doorbell_area)
4569 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004570 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004571 pci_release_regions(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004572}
4573
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004574static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4575 int cards_found)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576{
4577 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004578 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004579
Ron Mercere3324712009-07-02 06:06:13 +00004580 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004581 err = pci_enable_device(pdev);
4582 if (err) {
4583 dev_err(&pdev->dev, "PCI device enable failed.\n");
4584 return err;
4585 }
4586
Ron Mercerebd6e772009-09-29 08:39:25 +00004587 qdev->ndev = ndev;
4588 qdev->pdev = pdev;
4589 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004590
Ron Mercerbc9167f2009-10-10 09:35:04 +00004591 /* Set PCIe read request size */
4592 err = pcie_set_readrq(pdev, 4096);
4593 if (err) {
4594 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004595 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004596 }
4597
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004598 err = pci_request_regions(pdev, DRV_NAME);
4599 if (err) {
4600 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004601 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004602 }
4603
4604 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004605 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004606 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004607 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004608 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004609 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004610 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004611 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004612 }
4613
4614 if (err) {
4615 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004616 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004617 }
4618
Ron Mercer73475332009-11-06 07:44:58 +00004619 /* Set PCIe reset type for EEH to fundamental. */
4620 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004621 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004622 qdev->reg_base =
4623 ioremap_nocache(pci_resource_start(pdev, 1),
4624 pci_resource_len(pdev, 1));
4625 if (!qdev->reg_base) {
4626 dev_err(&pdev->dev, "Register mapping failed.\n");
4627 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004628 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004629 }
4630
4631 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4632 qdev->doorbell_area =
4633 ioremap_nocache(pci_resource_start(pdev, 3),
4634 pci_resource_len(pdev, 3));
4635 if (!qdev->doorbell_area) {
4636 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4637 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004638 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004639 }
4640
Ron Mercere4552f52009-06-09 05:39:32 +00004641 err = ql_get_board_info(qdev);
4642 if (err) {
4643 dev_err(&pdev->dev, "Register access failed.\n");
4644 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004645 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004646 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004647 qdev->msg_enable = netif_msg_init(debug, default_msg);
4648 spin_lock_init(&qdev->hw_lock);
4649 spin_lock_init(&qdev->stats_lock);
4650
Ron Mercer8aae2602010-01-15 13:31:28 +00004651 if (qlge_mpi_coredump) {
4652 qdev->mpi_coredump =
4653 vmalloc(sizeof(struct ql_mpi_coredump));
4654 if (qdev->mpi_coredump == NULL) {
Ron Mercer8aae2602010-01-15 13:31:28 +00004655 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004656 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004657 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004658 if (qlge_force_coredump)
4659 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004660 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004661 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004662 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004663 if (err) {
4664 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004665 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004666 }
4667
Ron Mercer801e9092010-02-17 06:41:22 +00004668 /* Keep local copy of current mac address. */
4669 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004670
4671 /* Set up the default ring sizes. */
4672 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4673 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4674
4675 /* Set up the coalescing parameters. */
4676 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4677 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4678 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4679 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4680
4681 /*
4682 * Set up the operating parameters.
4683 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004684 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4685 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4686 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4687 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004688 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004689 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004690 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004691 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004692 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004693
4694 if (!cards_found) {
4695 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4696 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4697 DRV_NAME, DRV_VERSION);
4698 }
4699 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004700err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004701 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004702err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004703 pci_disable_device(pdev);
4704 return err;
4705}
4706
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004707static const struct net_device_ops qlge_netdev_ops = {
4708 .ndo_open = qlge_open,
4709 .ndo_stop = qlge_close,
4710 .ndo_start_xmit = qlge_send,
4711 .ndo_change_mtu = qlge_change_mtu,
4712 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004713 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004714 .ndo_set_mac_address = qlge_set_mac_address,
4715 .ndo_validate_addr = eth_validate_addr,
4716 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004717 .ndo_fix_features = qlge_fix_features,
4718 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004719 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4720 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004721};
4722
Ron Mercer15c052f2010-02-04 13:32:46 -08004723static void ql_timer(unsigned long data)
4724{
4725 struct ql_adapter *qdev = (struct ql_adapter *)data;
4726 u32 var = 0;
4727
4728 var = ql_read32(qdev, STS);
4729 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004730 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004731 return;
4732 }
4733
Breno Leitao72046d82010-07-01 03:00:17 +00004734 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004735}
4736
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004737static int qlge_probe(struct pci_dev *pdev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004738 const struct pci_device_id *pci_entry)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004739{
4740 struct net_device *ndev = NULL;
4741 struct ql_adapter *qdev = NULL;
4742 static int cards_found = 0;
4743 int err = 0;
4744
Ron Mercer1e213302009-03-09 10:59:21 +00004745 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004746 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004747 if (!ndev)
4748 return -ENOMEM;
4749
4750 err = ql_init_device(pdev, ndev, cards_found);
4751 if (err < 0) {
4752 free_netdev(ndev);
4753 return err;
4754 }
4755
4756 qdev = netdev_priv(ndev);
4757 SET_NETDEV_DEV(ndev, &pdev->dev);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04004758 ndev->hw_features = NETIF_F_SG |
4759 NETIF_F_IP_CSUM |
4760 NETIF_F_TSO |
4761 NETIF_F_TSO_ECN |
4762 NETIF_F_HW_VLAN_CTAG_TX |
4763 NETIF_F_HW_VLAN_CTAG_RX |
4764 NETIF_F_HW_VLAN_CTAG_FILTER |
4765 NETIF_F_RXCSUM;
4766 ndev->features = ndev->hw_features;
brenohl@br.ibm.com1a0150a92012-07-27 08:54:52 +00004767 ndev->vlan_features = ndev->hw_features;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004768
4769 if (test_bit(QL_DMA64, &qdev->flags))
4770 ndev->features |= NETIF_F_HIGHDMA;
4771
4772 /*
4773 * Set up net_device structure.
4774 */
4775 ndev->tx_queue_len = qdev->tx_ring_size;
4776 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004777
4778 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004779 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004780 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004781
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004782 err = register_netdev(ndev);
4783 if (err) {
4784 dev_err(&pdev->dev, "net device registration failed.\n");
4785 ql_release_all(pdev);
4786 pci_disable_device(pdev);
Wei Yongjun4d2593c2013-05-22 23:09:50 +00004787 free_netdev(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004788 return err;
4789 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004790 /* Start up the timer to trigger EEH if
4791 * the bus goes dead
4792 */
4793 init_timer_deferrable(&qdev->timer);
4794 qdev->timer.data = (unsigned long)qdev;
4795 qdev->timer.function = ql_timer;
4796 qdev->timer.expires = jiffies + (5*HZ);
4797 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004798 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004799 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004800 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004801 cards_found++;
4802 return 0;
4803}
4804
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004805netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4806{
4807 return qlge_send(skb, ndev);
4808}
4809
4810int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4811{
4812 return ql_clean_inbound_rx_ring(rx_ring, budget);
4813}
4814
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004815static void qlge_remove(struct pci_dev *pdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004816{
4817 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004818 struct ql_adapter *qdev = netdev_priv(ndev);
4819 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004820 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004821 unregister_netdev(ndev);
4822 ql_release_all(pdev);
4823 pci_disable_device(pdev);
4824 free_netdev(ndev);
4825}
4826
Ron Mercer6d190c62009-10-28 08:39:20 +00004827/* Clean up resources without touching hardware. */
4828static void ql_eeh_close(struct net_device *ndev)
4829{
4830 int i;
4831 struct ql_adapter *qdev = netdev_priv(ndev);
4832
4833 if (netif_carrier_ok(ndev)) {
4834 netif_carrier_off(ndev);
4835 netif_stop_queue(ndev);
4836 }
4837
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004838 /* Disabling the timer */
4839 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004840 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004841
4842 for (i = 0; i < qdev->rss_ring_count; i++)
4843 netif_napi_del(&qdev->rx_ring[i].napi);
4844
4845 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4846 ql_tx_ring_clean(qdev);
4847 ql_free_rx_buffers(qdev);
4848 ql_release_adapter_resources(qdev);
4849}
4850
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004851/*
4852 * This callback is called by the PCI subsystem whenever
4853 * a PCI bus error is detected.
4854 */
4855static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4856 enum pci_channel_state state)
4857{
4858 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004859 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004860
Ron Mercer6d190c62009-10-28 08:39:20 +00004861 switch (state) {
4862 case pci_channel_io_normal:
4863 return PCI_ERS_RESULT_CAN_RECOVER;
4864 case pci_channel_io_frozen:
4865 netif_device_detach(ndev);
4866 if (netif_running(ndev))
4867 ql_eeh_close(ndev);
4868 pci_disable_device(pdev);
4869 return PCI_ERS_RESULT_NEED_RESET;
4870 case pci_channel_io_perm_failure:
4871 dev_err(&pdev->dev,
4872 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004873 ql_eeh_close(ndev);
4874 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004875 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004876 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004877
4878 /* Request a slot reset. */
4879 return PCI_ERS_RESULT_NEED_RESET;
4880}
4881
4882/*
4883 * This callback is called after the PCI buss has been reset.
4884 * Basically, this tries to restart the card from scratch.
4885 * This is a shortened version of the device probe/discovery code,
4886 * it resembles the first-half of the () routine.
4887 */
4888static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4889{
4890 struct net_device *ndev = pci_get_drvdata(pdev);
4891 struct ql_adapter *qdev = netdev_priv(ndev);
4892
Ron Mercer6d190c62009-10-28 08:39:20 +00004893 pdev->error_state = pci_channel_io_normal;
4894
4895 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004896 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004897 netif_err(qdev, ifup, qdev->ndev,
4898 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004899 return PCI_ERS_RESULT_DISCONNECT;
4900 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004901 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004902
4903 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004904 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004905 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004906 return PCI_ERS_RESULT_DISCONNECT;
4907 }
4908
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004909 return PCI_ERS_RESULT_RECOVERED;
4910}
4911
4912static void qlge_io_resume(struct pci_dev *pdev)
4913{
4914 struct net_device *ndev = pci_get_drvdata(pdev);
4915 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004916 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004917
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004918 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004919 err = qlge_open(ndev);
4920 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004921 netif_err(qdev, ifup, qdev->ndev,
4922 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004923 return;
4924 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004925 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004926 netif_err(qdev, ifup, qdev->ndev,
4927 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004928 }
Breno Leitao72046d82010-07-01 03:00:17 +00004929 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004930 netif_device_attach(ndev);
4931}
4932
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004933static const struct pci_error_handlers qlge_err_handler = {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004934 .error_detected = qlge_io_error_detected,
4935 .slot_reset = qlge_io_slot_reset,
4936 .resume = qlge_io_resume,
4937};
4938
4939static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4940{
4941 struct net_device *ndev = pci_get_drvdata(pdev);
4942 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004943 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004944
4945 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004946 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004947
4948 if (netif_running(ndev)) {
4949 err = ql_adapter_down(qdev);
4950 if (!err)
4951 return err;
4952 }
4953
Ron Mercerbc083ce2009-10-21 11:07:40 +00004954 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004955 err = pci_save_state(pdev);
4956 if (err)
4957 return err;
4958
4959 pci_disable_device(pdev);
4960
4961 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4962
4963 return 0;
4964}
4965
David S. Miller04da2cf2008-09-19 16:14:24 -07004966#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004967static int qlge_resume(struct pci_dev *pdev)
4968{
4969 struct net_device *ndev = pci_get_drvdata(pdev);
4970 struct ql_adapter *qdev = netdev_priv(ndev);
4971 int err;
4972
4973 pci_set_power_state(pdev, PCI_D0);
4974 pci_restore_state(pdev);
4975 err = pci_enable_device(pdev);
4976 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004977 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004978 return err;
4979 }
4980 pci_set_master(pdev);
4981
4982 pci_enable_wake(pdev, PCI_D3hot, 0);
4983 pci_enable_wake(pdev, PCI_D3cold, 0);
4984
4985 if (netif_running(ndev)) {
4986 err = ql_adapter_up(qdev);
4987 if (err)
4988 return err;
4989 }
4990
Breno Leitao72046d82010-07-01 03:00:17 +00004991 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004992 netif_device_attach(ndev);
4993
4994 return 0;
4995}
David S. Miller04da2cf2008-09-19 16:14:24 -07004996#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004997
4998static void qlge_shutdown(struct pci_dev *pdev)
4999{
5000 qlge_suspend(pdev, PMSG_SUSPEND);
5001}
5002
5003static struct pci_driver qlge_driver = {
5004 .name = DRV_NAME,
5005 .id_table = qlge_pci_tbl,
5006 .probe = qlge_probe,
Bill Pemberton5d8e8722012-12-03 09:23:27 -05005007 .remove = qlge_remove,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005008#ifdef CONFIG_PM
5009 .suspend = qlge_suspend,
5010 .resume = qlge_resume,
5011#endif
5012 .shutdown = qlge_shutdown,
5013 .err_handler = &qlge_err_handler
5014};
5015
Peter Hüwe70a611d2013-05-21 12:58:08 +00005016module_pci_driver(qlge_driver);