blob: 64f94098bc0239f99932bc6ed093e64b1cff0d21 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000010#include <linux/bitops.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040011#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/list.h>
14#include <linux/pci.h>
15#include <linux/dma-mapping.h>
16#include <linux/pagemap.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dmapool.h>
20#include <linux/mempool.h>
21#include <linux/spinlock.h>
22#include <linux/kthread.h>
23#include <linux/interrupt.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <linux/ipv6.h>
29#include <net/ipv6.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/if_ether.h>
34#include <linux/netdevice.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
Jiri Pirko18c49b92011-07-21 03:24:11 +000037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040039#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040042#include <linux/prefetch.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070043#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040044
45#include "qlge.h"
46
47char qlge_driver_name[] = DRV_NAME;
48const char qlge_driver_version[] = DRV_VERSION;
49
50MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51MODULE_DESCRIPTION(DRV_STRING " ");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57/* NETIF_MSG_TIMER | */
58 NETIF_MSG_IFDOWN |
59 NETIF_MSG_IFUP |
60 NETIF_MSG_RX_ERR |
61 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000062/* NETIF_MSG_TX_QUEUED | */
63/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040064/* NETIF_MSG_PKTDATA | */
65 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
Sonny Rao84cf7022010-11-18 11:50:02 +000067static int debug = -1; /* defaults above */
68module_param(debug, int, 0664);
Ron Mercerc4e84bd2008-09-18 11:56:28 -040069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71#define MSIX_IRQ 0
72#define MSI_IRQ 1
73#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000074static int qlge_irq_type = MSIX_IRQ;
Sonny Rao84cf7022010-11-18 11:50:02 +000075module_param(qlge_irq_type, int, 0664);
Ron Mercera5a62a12009-11-11 12:54:05 +000076MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040077
Ron Mercer8aae2602010-01-15 13:31:28 +000078static int qlge_mpi_coredump;
79module_param(qlge_mpi_coredump, int, 0);
80MODULE_PARM_DESC(qlge_mpi_coredump,
81 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000082 "Default is OFF - Do Not allocate memory. ");
83
84static int qlge_force_coredump;
85module_param(qlge_force_coredump, int, 0);
86MODULE_PARM_DESC(qlge_force_coredump,
87 "Option to allow force of firmware core dump. "
88 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000089
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000090static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000091 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000092 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040093 /* required last entry */
94 {0,}
95};
96
97MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -040099static int ql_wol(struct ql_adapter *);
100static void qlge_set_multicast_list(struct net_device *);
101static int ql_adapter_down(struct ql_adapter *);
102static int ql_adapter_up(struct ql_adapter *);
stephen hemmingerac409212010-10-21 07:50:54 +0000103
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400104/* This hardware semaphore causes exclusive access to
105 * resources shared between the NIC driver, MPI firmware,
106 * FCOE firmware and the FC driver.
107 */
108static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
109{
110 u32 sem_bits = 0;
111
112 switch (sem_mask) {
113 case SEM_XGMAC0_MASK:
114 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
115 break;
116 case SEM_XGMAC1_MASK:
117 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
118 break;
119 case SEM_ICB_MASK:
120 sem_bits = SEM_SET << SEM_ICB_SHIFT;
121 break;
122 case SEM_MAC_ADDR_MASK:
123 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
124 break;
125 case SEM_FLASH_MASK:
126 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
127 break;
128 case SEM_PROBE_MASK:
129 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
130 break;
131 case SEM_RT_IDX_MASK:
132 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
133 break;
134 case SEM_PROC_REG_MASK:
135 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
136 break;
137 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000138 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400139 return -EINVAL;
140 }
141
142 ql_write32(qdev, SEM, sem_bits | sem_mask);
143 return !(ql_read32(qdev, SEM) & sem_bits);
144}
145
146int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
147{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000148 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400149 do {
150 if (!ql_sem_trylock(qdev, sem_mask))
151 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000152 udelay(100);
153 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400154 return -ETIMEDOUT;
155}
156
157void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
158{
159 ql_write32(qdev, SEM, sem_mask);
160 ql_read32(qdev, SEM); /* flush */
161}
162
163/* This function waits for a specific bit to come ready
164 * in a given register. It is used mostly by the initialize
165 * process, but is also used in kernel thread API such as
166 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
167 */
168int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
169{
170 u32 temp;
171 int count = UDELAY_COUNT;
172
173 while (count) {
174 temp = ql_read32(qdev, reg);
175
176 /* check for errors */
177 if (temp & err_bit) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000178 netif_alert(qdev, probe, qdev->ndev,
179 "register 0x%.08x access error, value = 0x%.08x!.\n",
180 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400181 return -EIO;
182 } else if (temp & bit)
183 return 0;
184 udelay(UDELAY_DELAY);
185 count--;
186 }
Joe Perchesae9540f72010-02-09 11:49:52 +0000187 netif_alert(qdev, probe, qdev->ndev,
188 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400189 return -ETIMEDOUT;
190}
191
192/* The CFG register is used to download TX and RX control blocks
193 * to the chip. This function waits for an operation to complete.
194 */
195static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
196{
197 int count = UDELAY_COUNT;
198 u32 temp;
199
200 while (count) {
201 temp = ql_read32(qdev, CFG);
202 if (temp & CFG_LE)
203 return -EIO;
204 if (!(temp & bit))
205 return 0;
206 udelay(UDELAY_DELAY);
207 count--;
208 }
209 return -ETIMEDOUT;
210}
211
212
213/* Used to issue init control blocks to hw. Maps control block,
214 * sets address, triggers download, waits for completion.
215 */
216int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
217 u16 q_id)
218{
219 u64 map;
220 int status = 0;
221 int direction;
222 u32 mask;
223 u32 value;
224
225 direction =
226 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
227 PCI_DMA_FROMDEVICE;
228
229 map = pci_map_single(qdev->pdev, ptr, size, direction);
230 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000231 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400232 return -ENOMEM;
233 }
234
Ron Mercer4322c5b2009-07-02 06:06:06 +0000235 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
236 if (status)
237 return status;
238
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400239 status = ql_wait_cfg(qdev, bit);
240 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000241 netif_err(qdev, ifup, qdev->ndev,
242 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400243 goto exit;
244 }
245
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400246 ql_write32(qdev, ICB_L, (u32) map);
247 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400248
249 mask = CFG_Q_MASK | (bit << 16);
250 value = bit | (q_id << CFG_Q_SHIFT);
251 ql_write32(qdev, CFG, (mask | value));
252
253 /*
254 * Wait for the bit to clear after signaling hw.
255 */
256 status = ql_wait_cfg(qdev, bit);
257exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000258 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400259 pci_unmap_single(qdev->pdev, map, size, direction);
260 return status;
261}
262
263/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
264int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
265 u32 *value)
266{
267 u32 offset = 0;
268 int status;
269
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400270 switch (type) {
271 case MAC_ADDR_TYPE_MULTI_MAC:
272 case MAC_ADDR_TYPE_CAM_MAC:
273 {
274 status =
275 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800276 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400277 if (status)
278 goto exit;
279 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
280 (index << MAC_ADDR_IDX_SHIFT) | /* index */
281 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
282 status =
283 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800284 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400285 if (status)
286 goto exit;
287 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
288 status =
289 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800290 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400291 if (status)
292 goto exit;
293 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
294 (index << MAC_ADDR_IDX_SHIFT) | /* index */
295 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
296 status =
297 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800298 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400299 if (status)
300 goto exit;
301 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
302 if (type == MAC_ADDR_TYPE_CAM_MAC) {
303 status =
304 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800305 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400306 if (status)
307 goto exit;
308 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
309 (index << MAC_ADDR_IDX_SHIFT) | /* index */
310 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
311 status =
312 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800313 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400314 if (status)
315 goto exit;
316 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
317 }
318 break;
319 }
320 case MAC_ADDR_TYPE_VLAN:
321 case MAC_ADDR_TYPE_MULTI_FLTR:
322 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000323 netif_crit(qdev, ifup, qdev->ndev,
324 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400325 status = -EPERM;
326 }
327exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400328 return status;
329}
330
331/* Set up a MAC, multicast or VLAN address for the
332 * inbound frame matching.
333 */
334static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
335 u16 index)
336{
337 u32 offset = 0;
338 int status = 0;
339
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400340 switch (type) {
341 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000342 {
343 u32 upper = (addr[0] << 8) | addr[1];
344 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
345 (addr[4] << 8) | (addr[5]);
346
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
353 (index << MAC_ADDR_IDX_SHIFT) |
354 type | MAC_ADDR_E);
355 ql_write32(qdev, MAC_ADDR_DATA, lower);
356 status =
357 ql_wait_reg_rdy(qdev,
358 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 if (status)
360 goto exit;
361 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
362 (index << MAC_ADDR_IDX_SHIFT) |
363 type | MAC_ADDR_E);
364
365 ql_write32(qdev, MAC_ADDR_DATA, upper);
366 status =
367 ql_wait_reg_rdy(qdev,
368 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
369 if (status)
370 goto exit;
371 break;
372 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400373 case MAC_ADDR_TYPE_CAM_MAC:
374 {
375 u32 cam_output;
376 u32 upper = (addr[0] << 8) | addr[1];
377 u32 lower =
378 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
379 (addr[5]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400380 status =
381 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383 if (status)
384 goto exit;
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 type); /* type */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 status =
390 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400392 if (status)
393 goto exit;
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 type); /* type */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 status =
399 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400401 if (status)
402 goto exit;
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 type); /* type */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
409 */
Ron Mercer76b26692009-10-08 09:54:40 +0000410 cam_output = (CAM_OUT_ROUTE_NIC |
411 (qdev->
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
Patrick McHardyf6469682013-04-19 02:04:27 +0000414 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
Ron Mercer76b26692009-10-08 09:54:40 +0000415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400418 break;
419 }
420 case MAC_ADDR_TYPE_VLAN:
421 {
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
427 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400428 status =
429 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800430 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400431 if (status)
432 goto exit;
433 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
434 (index << MAC_ADDR_IDX_SHIFT) | /* index */
435 type | /* type */
436 enable_bit); /* enable/disable */
437 break;
438 }
439 case MAC_ADDR_TYPE_MULTI_FLTR:
440 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000441 netif_crit(qdev, ifup, qdev->ndev,
442 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400443 status = -EPERM;
444 }
445exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400446 return status;
447}
448
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000449/* Set or clear MAC address in hardware. We sometimes
450 * have to clear it to prevent wrong frame routing
451 * especially in a bonding environment.
452 */
453static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
454{
455 int status;
456 char zero_mac_addr[ETH_ALEN];
457 char *addr;
458
459 if (set) {
Ron Mercer801e9092010-02-17 06:41:22 +0000460 addr = &qdev->current_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000461 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
462 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000463 } else {
464 memset(zero_mac_addr, 0, ETH_ALEN);
465 addr = &zero_mac_addr[0];
Joe Perchesae9540f72010-02-09 11:49:52 +0000466 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
467 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000468 }
469 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
470 if (status)
471 return status;
472 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
473 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
474 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
475 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +0000476 netif_err(qdev, ifup, qdev->ndev,
477 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000478 return status;
479}
480
Ron Mercer6a473302009-07-02 06:06:12 +0000481void ql_link_on(struct ql_adapter *qdev)
482{
Joe Perchesae9540f72010-02-09 11:49:52 +0000483 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000484 netif_carrier_on(qdev->ndev);
485 ql_set_mac_addr(qdev, 1);
486}
487
488void ql_link_off(struct ql_adapter *qdev)
489{
Joe Perchesae9540f72010-02-09 11:49:52 +0000490 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000491 netif_carrier_off(qdev->ndev);
492 ql_set_mac_addr(qdev, 0);
493}
494
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400495/* Get a specific frame routing value from the CAM.
496 * Used for debug and reg dump.
497 */
498int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
499{
500 int status = 0;
501
Ron Mercer939678f2009-01-04 17:08:29 -0800502 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400503 if (status)
504 goto exit;
505
506 ql_write32(qdev, RT_IDX,
507 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400509 if (status)
510 goto exit;
511 *value = ql_read32(qdev, RT_DATA);
512exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400513 return status;
514}
515
516/* The NIC function for this chip has 16 routing indexes. Each one can be used
517 * to route different frame types to various inbound queues. We send broadcast/
518 * multicast/error frames to the default queue for slow handling,
519 * and CAM hit/RSS frames to the fast handling queues.
520 */
521static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
522 int enable)
523{
Ron Mercer8587ea32009-02-23 10:42:15 +0000524 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 u32 value = 0;
526
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400527 switch (mask) {
528 case RT_IDX_CAM_HIT:
529 {
530 value = RT_IDX_DST_CAM_Q | /* dest */
531 RT_IDX_TYPE_NICQ | /* type */
532 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 break;
534 }
535 case RT_IDX_VALID: /* Promiscuous Mode frames. */
536 {
537 value = RT_IDX_DST_DFLT_Q | /* dest */
538 RT_IDX_TYPE_NICQ | /* type */
539 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
540 break;
541 }
542 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
543 {
544 value = RT_IDX_DST_DFLT_Q | /* dest */
545 RT_IDX_TYPE_NICQ | /* type */
546 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
547 break;
548 }
Ron Mercerfbc2ac32010-07-05 12:19:41 +0000549 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
550 {
551 value = RT_IDX_DST_DFLT_Q | /* dest */
552 RT_IDX_TYPE_NICQ | /* type */
553 (RT_IDX_IP_CSUM_ERR_SLOT <<
554 RT_IDX_IDX_SHIFT); /* index */
555 break;
556 }
557 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
558 {
559 value = RT_IDX_DST_DFLT_Q | /* dest */
560 RT_IDX_TYPE_NICQ | /* type */
561 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
562 RT_IDX_IDX_SHIFT); /* index */
563 break;
564 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400565 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
566 {
567 value = RT_IDX_DST_DFLT_Q | /* dest */
568 RT_IDX_TYPE_NICQ | /* type */
569 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
570 break;
571 }
572 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
573 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000574 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400575 RT_IDX_TYPE_NICQ | /* type */
576 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
577 break;
578 }
579 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
580 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000581 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400582 RT_IDX_TYPE_NICQ | /* type */
583 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
584 break;
585 }
586 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
587 {
588 value = RT_IDX_DST_RSS | /* dest */
589 RT_IDX_TYPE_NICQ | /* type */
590 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
591 break;
592 }
593 case 0: /* Clear the E-bit on an entry. */
594 {
595 value = RT_IDX_DST_DFLT_Q | /* dest */
596 RT_IDX_TYPE_NICQ | /* type */
597 (index << RT_IDX_IDX_SHIFT);/* index */
598 break;
599 }
600 default:
Joe Perchesae9540f72010-02-09 11:49:52 +0000601 netif_err(qdev, ifup, qdev->ndev,
602 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400603 status = -EPERM;
604 goto exit;
605 }
606
607 if (value) {
608 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
609 if (status)
610 goto exit;
611 value |= (enable ? RT_IDX_E : 0);
612 ql_write32(qdev, RT_IDX, value);
613 ql_write32(qdev, RT_DATA, enable ? mask : 0);
614 }
615exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400616 return status;
617}
618
619static void ql_enable_interrupts(struct ql_adapter *qdev)
620{
621 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
622}
623
624static void ql_disable_interrupts(struct ql_adapter *qdev)
625{
626 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
627}
628
629/* If we're running with multiple MSI-X vectors then we enable on the fly.
630 * Otherwise, we may have multiple outstanding workers and don't want to
631 * enable until the last one finishes. In this case, the irq_cnt gets
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300632 * incremented every time we queue a worker and decremented every time
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400633 * a worker finishes. Once it hits zero we enable the interrupt.
634 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700635u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400636{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700637 u32 var = 0;
638 unsigned long hw_flags = 0;
639 struct intr_context *ctx = qdev->intr_context + intr;
640
641 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
642 /* Always enable if we're MSIX multi interrupts and
643 * it's not the default (zeroeth) interrupt.
644 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400645 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700646 ctx->intr_en_mask);
647 var = ql_read32(qdev, STS);
648 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700650
651 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
652 if (atomic_dec_and_test(&ctx->irq_cnt)) {
653 ql_write32(qdev, INTR_EN,
654 ctx->intr_en_mask);
655 var = ql_read32(qdev, STS);
656 }
657 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
658 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400659}
660
661static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
662{
663 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700664 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400665
Ron Mercerbb0d2152008-10-20 10:30:26 -0700666 /* HW disables for us if we're MSIX multi interrupts and
667 * it's not the default (zeroeth) interrupt.
668 */
669 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
670 return 0;
671
672 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000673 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700674 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400675 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700676 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677 var = ql_read32(qdev, STS);
678 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700679 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000680 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 return var;
682}
683
684static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
685{
686 int i;
687 for (i = 0; i < qdev->intr_count; i++) {
688 /* The enable call does a atomic_dec_and_test
689 * and enables only if the result is zero.
690 * So we precharge it here.
691 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700692 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
693 i == 0))
694 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400695 ql_enable_completion_interrupt(qdev, i);
696 }
697
698}
699
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000700static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
701{
702 int status, i;
703 u16 csum = 0;
704 __le16 *flash = (__le16 *)&qdev->flash;
705
706 status = strncmp((char *)&qdev->flash, str, 4);
707 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000708 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000709 return status;
710 }
711
712 for (i = 0; i < size; i++)
713 csum += le16_to_cpu(*flash++);
714
715 if (csum)
Joe Perchesae9540f72010-02-09 11:49:52 +0000716 netif_err(qdev, ifup, qdev->ndev,
717 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000718
719 return csum;
720}
721
Ron Mercer26351472009-02-02 13:53:57 -0800722static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400723{
724 int status = 0;
725 /* wait for reg to come ready */
726 status = ql_wait_reg_rdy(qdev,
727 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
728 if (status)
729 goto exit;
730 /* set up for reg read */
731 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
732 /* wait for reg to come ready */
733 status = ql_wait_reg_rdy(qdev,
734 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
735 if (status)
736 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800737 /* This data is stored on flash as an array of
738 * __le32. Since ql_read32() returns cpu endian
739 * we need to swap it back.
740 */
741 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400742exit:
743 return status;
744}
745
Ron Mercercdca8d02009-03-02 08:07:31 +0000746static int ql_get_8000_flash_params(struct ql_adapter *qdev)
747{
748 u32 i, size;
749 int status;
750 __le32 *p = (__le32 *)&qdev->flash;
751 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000752 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000753
754 /* Get flash offset for function and adjust
755 * for dword access.
756 */
Ron Mercere4552f52009-06-09 05:39:32 +0000757 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000758 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
759 else
760 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
761
762 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
763 return -ETIMEDOUT;
764
765 size = sizeof(struct flash_params_8000) / sizeof(u32);
766 for (i = 0; i < size; i++, p++) {
767 status = ql_read_flash_word(qdev, i+offset, p);
768 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000769 netif_err(qdev, ifup, qdev->ndev,
770 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000771 goto exit;
772 }
773 }
774
775 status = ql_validate_flash(qdev,
776 sizeof(struct flash_params_8000) / sizeof(u16),
777 "8000");
778 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000779 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000780 status = -EINVAL;
781 goto exit;
782 }
783
Ron Mercer542512e2009-06-09 05:39:33 +0000784 /* Extract either manufacturer or BOFM modified
785 * MAC address.
786 */
787 if (qdev->flash.flash_params_8000.data_type1 == 2)
788 memcpy(mac_addr,
789 qdev->flash.flash_params_8000.mac_addr1,
790 qdev->ndev->addr_len);
791 else
792 memcpy(mac_addr,
793 qdev->flash.flash_params_8000.mac_addr,
794 qdev->ndev->addr_len);
795
796 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000797 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000798 status = -EINVAL;
799 goto exit;
800 }
801
802 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000803 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000804 qdev->ndev->addr_len);
805
806exit:
807 ql_sem_unlock(qdev, SEM_FLASH_MASK);
808 return status;
809}
810
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000811static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400812{
813 int i;
814 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800815 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa72009-02-02 13:54:15 -0800816 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000817 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa72009-02-02 13:54:15 -0800818
819 /* Second function's parameters follow the first
820 * function's.
821 */
Ron Mercere4552f52009-06-09 05:39:32 +0000822 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000823 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400824
825 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
826 return -ETIMEDOUT;
827
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000828 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa72009-02-02 13:54:15 -0800829 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400830 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000831 netif_err(qdev, ifup, qdev->ndev,
832 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400833 goto exit;
834 }
835
836 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000837
838 status = ql_validate_flash(qdev,
839 sizeof(struct flash_params_8012) / sizeof(u16),
840 "8012");
841 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000842 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000843 status = -EINVAL;
844 goto exit;
845 }
846
847 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
848 status = -EINVAL;
849 goto exit;
850 }
851
852 memcpy(qdev->ndev->dev_addr,
853 qdev->flash.flash_params_8012.mac_addr,
854 qdev->ndev->addr_len);
855
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400856exit:
857 ql_sem_unlock(qdev, SEM_FLASH_MASK);
858 return status;
859}
860
861/* xgmac register are located behind the xgmac_addr and xgmac_data
862 * register pair. Each read/write requires us to wait for the ready
863 * bit before reading/writing the data.
864 */
865static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
866{
867 int status;
868 /* wait for reg to come ready */
869 status = ql_wait_reg_rdy(qdev,
870 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
871 if (status)
872 return status;
873 /* write the data to the data reg */
874 ql_write32(qdev, XGMAC_DATA, data);
875 /* trigger the write */
876 ql_write32(qdev, XGMAC_ADDR, reg);
877 return status;
878}
879
880/* xgmac register are located behind the xgmac_addr and xgmac_data
881 * register pair. Each read/write requires us to wait for the ready
882 * bit before reading/writing the data.
883 */
884int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
885{
886 int status = 0;
887 /* wait for reg to come ready */
888 status = ql_wait_reg_rdy(qdev,
889 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
890 if (status)
891 goto exit;
892 /* set up for reg read */
893 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
894 /* wait for reg to come ready */
895 status = ql_wait_reg_rdy(qdev,
896 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
897 if (status)
898 goto exit;
899 /* get the data */
900 *data = ql_read32(qdev, XGMAC_DATA);
901exit:
902 return status;
903}
904
905/* This is used for reading the 64-bit statistics regs. */
906int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
907{
908 int status = 0;
909 u32 hi = 0;
910 u32 lo = 0;
911
912 status = ql_read_xgmac_reg(qdev, reg, &lo);
913 if (status)
914 goto exit;
915
916 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
917 if (status)
918 goto exit;
919
920 *data = (u64) lo | ((u64) hi << 32);
921
922exit:
923 return status;
924}
925
Ron Mercercdca8d02009-03-02 08:07:31 +0000926static int ql_8000_port_initialize(struct ql_adapter *qdev)
927{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000928 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000929 /*
930 * Get MPI firmware version for driver banner
931 * and ethool info.
932 */
933 status = ql_mb_about_fw(qdev);
934 if (status)
935 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000936 status = ql_mb_get_fw_state(qdev);
937 if (status)
938 goto exit;
939 /* Wake up a worker to get/set the TX/RX frame sizes. */
940 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
941exit:
942 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000943}
944
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400945/* Take the MAC Core out of reset.
946 * Enable statistics counting.
947 * Take the transmitter/receiver out of reset.
948 * This functionality may be done in the MPI firmware at a
949 * later date.
950 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000951static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400952{
953 int status = 0;
954 u32 data;
955
956 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
957 /* Another function has the semaphore, so
958 * wait for the port init bit to come ready.
959 */
Joe Perchesae9540f72010-02-09 11:49:52 +0000960 netif_info(qdev, link, qdev->ndev,
961 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400962 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
963 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +0000964 netif_crit(qdev, link, qdev->ndev,
965 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400966 }
967 return status;
968 }
969
Joe Perchesae9540f72010-02-09 11:49:52 +0000970 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400971 /* Set the core reset. */
972 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
973 if (status)
974 goto end;
975 data |= GLOBAL_CFG_RESET;
976 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
977 if (status)
978 goto end;
979
980 /* Clear the core reset and turn on jumbo for receiver. */
981 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
982 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
983 data |= GLOBAL_CFG_TX_STAT_EN;
984 data |= GLOBAL_CFG_RX_STAT_EN;
985 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
986 if (status)
987 goto end;
988
989 /* Enable transmitter, and clear it's reset. */
990 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
991 if (status)
992 goto end;
993 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
994 data |= TX_CFG_EN; /* Enable the transmitter. */
995 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
996 if (status)
997 goto end;
998
999 /* Enable receiver and clear it's reset. */
1000 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1001 if (status)
1002 goto end;
1003 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1004 data |= RX_CFG_EN; /* Enable the receiver. */
1005 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1006 if (status)
1007 goto end;
1008
1009 /* Turn on jumbo. */
1010 status =
1011 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1012 if (status)
1013 goto end;
1014 status =
1015 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1016 if (status)
1017 goto end;
1018
1019 /* Signal to the world that the port is enabled. */
1020 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1021end:
1022 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1023 return status;
1024}
1025
Ron Mercer7c734352009-10-19 03:32:19 +00001026static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1027{
1028 return PAGE_SIZE << qdev->lbq_buf_order;
1029}
1030
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001031/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001032static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001033{
1034 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1035 rx_ring->lbq_curr_idx++;
1036 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1037 rx_ring->lbq_curr_idx = 0;
1038 rx_ring->lbq_free_cnt++;
1039 return lbq_desc;
1040}
1041
Ron Mercer7c734352009-10-19 03:32:19 +00001042static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1043 struct rx_ring *rx_ring)
1044{
1045 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1046
1047 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001048 dma_unmap_addr(lbq_desc, mapaddr),
Ron Mercer7c734352009-10-19 03:32:19 +00001049 rx_ring->lbq_buf_size,
1050 PCI_DMA_FROMDEVICE);
1051
1052 /* If it's the last chunk of our master page then
1053 * we unmap it.
1054 */
1055 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1056 == ql_lbq_block_size(qdev))
1057 pci_unmap_page(qdev->pdev,
1058 lbq_desc->p.pg_chunk.map,
1059 ql_lbq_block_size(qdev),
1060 PCI_DMA_FROMDEVICE);
1061 return lbq_desc;
1062}
1063
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001064/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001065static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066{
1067 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1068 rx_ring->sbq_curr_idx++;
1069 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1070 rx_ring->sbq_curr_idx = 0;
1071 rx_ring->sbq_free_cnt++;
1072 return sbq_desc;
1073}
1074
1075/* Update an rx ring index. */
1076static void ql_update_cq(struct rx_ring *rx_ring)
1077{
1078 rx_ring->cnsmr_idx++;
1079 rx_ring->curr_entry++;
1080 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1081 rx_ring->cnsmr_idx = 0;
1082 rx_ring->curr_entry = rx_ring->cq_base;
1083 }
1084}
1085
1086static void ql_write_cq_idx(struct rx_ring *rx_ring)
1087{
1088 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1089}
1090
Ron Mercer7c734352009-10-19 03:32:19 +00001091static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1092 struct bq_desc *lbq_desc)
1093{
1094 if (!rx_ring->pg_chunk.page) {
1095 u64 map;
1096 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1097 GFP_ATOMIC,
1098 qdev->lbq_buf_order);
1099 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001100 netif_err(qdev, drv, qdev->ndev,
1101 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001102 return -ENOMEM;
1103 }
1104 rx_ring->pg_chunk.offset = 0;
1105 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1106 0, ql_lbq_block_size(qdev),
1107 PCI_DMA_FROMDEVICE);
1108 if (pci_dma_mapping_error(qdev->pdev, map)) {
1109 __free_pages(rx_ring->pg_chunk.page,
1110 qdev->lbq_buf_order);
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00001111 rx_ring->pg_chunk.page = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00001112 netif_err(qdev, drv, qdev->ndev,
1113 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001114 return -ENOMEM;
1115 }
1116 rx_ring->pg_chunk.map = map;
1117 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1118 }
1119
1120 /* Copy the current master pg_chunk info
1121 * to the current descriptor.
1122 */
1123 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1124
1125 /* Adjust the master page chunk for next
1126 * buffer get.
1127 */
1128 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1129 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1130 rx_ring->pg_chunk.page = NULL;
1131 lbq_desc->p.pg_chunk.last_flag = 1;
1132 } else {
1133 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1134 get_page(rx_ring->pg_chunk.page);
1135 lbq_desc->p.pg_chunk.last_flag = 0;
1136 }
1137 return 0;
1138}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001139/* Process (refill) a large buffer queue. */
1140static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1141{
Ron Mercer49f21862009-02-23 10:42:16 +00001142 u32 clean_idx = rx_ring->lbq_clean_idx;
1143 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001144 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001145 u64 map;
1146 int i;
1147
Ron Mercer7c734352009-10-19 03:32:19 +00001148 while (rx_ring->lbq_free_cnt > 32) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001149 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001150 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1151 "lbq: try cleaning clean_idx = %d.\n",
1152 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001153 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001154 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001155 rx_ring->lbq_clean_idx = clean_idx;
Joe Perchesae9540f72010-02-09 11:49:52 +00001156 netif_err(qdev, ifup, qdev->ndev,
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001157 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1158 i, clean_idx);
Joe Perchesae9540f72010-02-09 11:49:52 +00001159 return;
1160 }
Ron Mercer7c734352009-10-19 03:32:19 +00001161
1162 map = lbq_desc->p.pg_chunk.map +
1163 lbq_desc->p.pg_chunk.offset;
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001164 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1165 dma_unmap_len_set(lbq_desc, maplen,
Ron Mercer7c734352009-10-19 03:32:19 +00001166 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001167 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001168
1169 pci_dma_sync_single_for_device(qdev->pdev, map,
1170 rx_ring->lbq_buf_size,
1171 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001172 clean_idx++;
1173 if (clean_idx == rx_ring->lbq_len)
1174 clean_idx = 0;
1175 }
1176
1177 rx_ring->lbq_clean_idx = clean_idx;
1178 rx_ring->lbq_prod_idx += 16;
1179 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1180 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001181 rx_ring->lbq_free_cnt -= 16;
1182 }
1183
1184 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001185 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1186 "lbq: updating prod idx = %d.\n",
1187 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001188 ql_write_db_reg(rx_ring->lbq_prod_idx,
1189 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001190 }
1191}
1192
1193/* Process (refill) a small buffer queue. */
1194static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1195{
Ron Mercer49f21862009-02-23 10:42:16 +00001196 u32 clean_idx = rx_ring->sbq_clean_idx;
1197 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001199 u64 map;
1200 int i;
1201
1202 while (rx_ring->sbq_free_cnt > 16) {
Jitendra Kalsaria81f25d92012-02-03 14:06:51 +00001203 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001204 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f72010-02-09 11:49:52 +00001205 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1206 "sbq: try cleaning clean_idx = %d.\n",
1207 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001208 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001209 netif_printk(qdev, rx_status, KERN_DEBUG,
1210 qdev->ndev,
1211 "sbq: getting new skb for index %d.\n",
1212 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001213 sbq_desc->p.skb =
1214 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001215 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001216 if (sbq_desc->p.skb == NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001217 rx_ring->sbq_clean_idx = clean_idx;
1218 return;
1219 }
1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221 map = pci_map_single(qdev->pdev,
1222 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001223 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001225 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001226 netif_err(qdev, ifup, qdev->ndev,
1227 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001228 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001229 dev_kfree_skb_any(sbq_desc->p.skb);
1230 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001231 return;
1232 }
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001233 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1234 dma_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001235 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001236 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001237 }
1238
1239 clean_idx++;
1240 if (clean_idx == rx_ring->sbq_len)
1241 clean_idx = 0;
1242 }
1243 rx_ring->sbq_clean_idx = clean_idx;
1244 rx_ring->sbq_prod_idx += 16;
1245 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1246 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001247 rx_ring->sbq_free_cnt -= 16;
1248 }
1249
1250 if (start_idx != clean_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001251 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1252 "sbq: updating prod idx = %d.\n",
1253 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001254 ql_write_db_reg(rx_ring->sbq_prod_idx,
1255 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001256 }
1257}
1258
1259static void ql_update_buffer_queues(struct ql_adapter *qdev,
1260 struct rx_ring *rx_ring)
1261{
1262 ql_update_sbq(qdev, rx_ring);
1263 ql_update_lbq(qdev, rx_ring);
1264}
1265
1266/* Unmaps tx buffers. Can be called from send() if a pci mapping
1267 * fails at some stage, or from the interrupt when a tx completes.
1268 */
1269static void ql_unmap_send(struct ql_adapter *qdev,
1270 struct tx_ring_desc *tx_ring_desc, int mapped)
1271{
1272 int i;
1273 for (i = 0; i < mapped; i++) {
1274 if (i == 0 || (i == 7 && mapped > 7)) {
1275 /*
1276 * Unmap the skb->data area, or the
1277 * external sglist (AKA the Outbound
1278 * Address List (OAL)).
1279 * If its the zeroeth element, then it's
1280 * the skb->data area. If it's the 7th
1281 * element and there is more than 6 frags,
1282 * then its an OAL.
1283 */
1284 if (i == 7) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001285 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 qdev->ndev,
1287 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001288 }
1289 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001290 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001291 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001292 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001293 maplen),
1294 PCI_DMA_TODEVICE);
1295 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001296 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1297 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001298 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001299 dma_unmap_addr(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001300 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001301 dma_unmap_len(&tx_ring_desc->map[i],
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001302 maplen), PCI_DMA_TODEVICE);
1303 }
1304 }
1305
1306}
1307
1308/* Map the buffers for this transmit. This will return
1309 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 */
1311static int ql_map_send(struct ql_adapter *qdev,
1312 struct ob_mac_iocb_req *mac_iocb_ptr,
1313 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314{
1315 int len = skb_headlen(skb);
1316 dma_addr_t map;
1317 int frag_idx, err, map_idx = 0;
1318 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1319 int frag_cnt = skb_shinfo(skb)->nr_frags;
1320
1321 if (frag_cnt) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001322 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1323 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001324 }
1325 /*
1326 * Map the skb buffer first.
1327 */
1328 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329
1330 err = pci_dma_mapping_error(qdev->pdev, map);
1331 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001332 netif_err(qdev, tx_queued, qdev->ndev,
1333 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001334
1335 return NETDEV_TX_BUSY;
1336 }
1337
1338 tbd->len = cpu_to_le32(len);
1339 tbd->addr = cpu_to_le64(map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001340 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1341 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001342 map_idx++;
1343
1344 /*
1345 * This loop fills the remainder of the 8 address descriptors
1346 * in the IOCB. If there are more than 7 fragments, then the
1347 * eighth address desc will point to an external list (OAL).
1348 * When this happens, the remainder of the frags will be stored
1349 * in this list.
1350 */
1351 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1352 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 tbd++;
1354 if (frag_idx == 6 && frag_cnt > 7) {
1355 /* Let's tack on an sglist.
1356 * Our control block will now
1357 * look like this:
1358 * iocb->seg[0] = skb->data
1359 * iocb->seg[1] = frag[0]
1360 * iocb->seg[2] = frag[1]
1361 * iocb->seg[3] = frag[2]
1362 * iocb->seg[4] = frag[3]
1363 * iocb->seg[5] = frag[4]
1364 * iocb->seg[6] = frag[5]
1365 * iocb->seg[7] = ptr to OAL (external sglist)
1366 * oal->seg[0] = frag[6]
1367 * oal->seg[1] = frag[7]
1368 * oal->seg[2] = frag[8]
1369 * oal->seg[3] = frag[9]
1370 * oal->seg[4] = frag[10]
1371 * etc...
1372 */
1373 /* Tack on the OAL in the eighth segment of IOCB. */
1374 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1375 sizeof(struct oal),
1376 PCI_DMA_TODEVICE);
1377 err = pci_dma_mapping_error(qdev->pdev, map);
1378 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001379 netif_err(qdev, tx_queued, qdev->ndev,
1380 "PCI mapping outbound address list with error: %d\n",
1381 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001382 goto map_error;
1383 }
1384
1385 tbd->addr = cpu_to_le64(map);
1386 /*
1387 * The length is the number of fragments
1388 * that remain to be mapped times the length
1389 * of our sglist (OAL).
1390 */
1391 tbd->len =
1392 cpu_to_le32((sizeof(struct tx_buf_desc) *
1393 (frag_cnt - frag_idx)) | TX_DESC_C);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001394 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001395 map);
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001396 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001397 sizeof(struct oal));
1398 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1399 map_idx++;
1400 }
1401
Eric Dumazet9e903e02011-10-18 21:00:24 +00001402 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001403 DMA_TO_DEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001404
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01001405 err = dma_mapping_error(&qdev->pdev->dev, map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001406 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001407 netif_err(qdev, tx_queued, qdev->ndev,
1408 "PCI mapping frags failed with error: %d.\n",
1409 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001410 goto map_error;
1411 }
1412
1413 tbd->addr = cpu_to_le64(map);
Eric Dumazet9e903e02011-10-18 21:00:24 +00001414 tbd->len = cpu_to_le32(skb_frag_size(frag));
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001415 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1416 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001417 skb_frag_size(frag));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001418
1419 }
1420 /* Save the number of segments we've mapped. */
1421 tx_ring_desc->map_cnt = map_idx;
1422 /* Terminate the last segment. */
1423 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1424 return NETDEV_TX_OK;
1425
1426map_error:
1427 /*
1428 * If the first frag mapping failed, then i will be zero.
1429 * This causes the unmap of the skb->data area. Otherwise
1430 * we pass in the number of frags that mapped successfully
1431 * so they can be umapped.
1432 */
1433 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1434 return NETDEV_TX_BUSY;
1435}
1436
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001437/* Categorizing receive firmware frame errors */
Sritej Velagaae721f32013-04-18 19:49:52 +00001438static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1439 struct rx_ring *rx_ring)
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001440{
1441 struct nic_stats *stats = &qdev->nic_stats;
1442
1443 stats->rx_err_count++;
Sritej Velagaae721f32013-04-18 19:49:52 +00001444 rx_ring->rx_errors++;
Jitendra Kalsaria433c88e2012-07-10 14:57:37 +00001445
1446 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1447 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1448 stats->rx_code_err++;
1449 break;
1450 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1451 stats->rx_oversize_err++;
1452 break;
1453 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1454 stats->rx_undersize_err++;
1455 break;
1456 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1457 stats->rx_preamble_err++;
1458 break;
1459 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1460 stats->rx_frame_len_err++;
1461 break;
1462 case IB_MAC_IOCB_RSP_ERR_CRC:
1463 stats->rx_crc_err++;
1464 default:
1465 break;
1466 }
1467}
1468
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001469/**
1470 * ql_update_mac_hdr_len - helper routine to update the mac header length
1471 * based on vlan tags if present
1472 */
1473static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1474 struct ib_mac_iocb_rsp *ib_mac_rsp,
1475 void *page, size_t *len)
1476{
1477 u16 *tags;
1478
1479 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1480 return;
1481 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1482 tags = (u16 *)page;
1483 /* Look for stacked vlan tags in ethertype field */
1484 if (tags[6] == ETH_P_8021Q &&
1485 tags[8] == ETH_P_8021Q)
1486 *len += 2 * VLAN_HLEN;
1487 else
1488 *len += VLAN_HLEN;
1489 }
1490}
1491
Ron Mercer4f848c02010-01-02 10:37:43 +00001492/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001493static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1494 struct rx_ring *rx_ring,
1495 struct ib_mac_iocb_rsp *ib_mac_rsp,
1496 u32 length,
1497 u16 vlan_id)
1498{
1499 struct sk_buff *skb;
1500 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercer63526712010-01-02 10:37:44 +00001501 struct napi_struct *napi = &rx_ring->napi;
1502
Sritej Velagaae721f32013-04-18 19:49:52 +00001503 /* Frame error, so drop the packet. */
1504 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1505 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1506 put_page(lbq_desc->p.pg_chunk.page);
1507 return;
1508 }
Ron Mercer63526712010-01-02 10:37:44 +00001509 napi->dev = qdev->ndev;
1510
1511 skb = napi_get_frags(napi);
1512 if (!skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001513 netif_err(qdev, drv, qdev->ndev,
1514 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001515 rx_ring->rx_dropped++;
1516 put_page(lbq_desc->p.pg_chunk.page);
1517 return;
1518 }
1519 prefetch(lbq_desc->p.pg_chunk.va);
Ian Campbellda7ebfd2011-08-31 00:47:05 +00001520 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1521 lbq_desc->p.pg_chunk.page,
1522 lbq_desc->p.pg_chunk.offset,
1523 length);
Ron Mercer63526712010-01-02 10:37:44 +00001524
1525 skb->len += length;
1526 skb->data_len += length;
1527 skb->truesize += length;
1528 skb_shinfo(skb)->nr_frags++;
1529
1530 rx_ring->rx_packets++;
1531 rx_ring->rx_bytes += length;
1532 skb->ip_summed = CHECKSUM_UNNECESSARY;
1533 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001534 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001536 napi_gro_frags(napi);
Ron Mercer63526712010-01-02 10:37:44 +00001537}
1538
1539/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001540static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1541 struct rx_ring *rx_ring,
1542 struct ib_mac_iocb_rsp *ib_mac_rsp,
1543 u32 length,
1544 u16 vlan_id)
1545{
1546 struct net_device *ndev = qdev->ndev;
1547 struct sk_buff *skb = NULL;
1548 void *addr;
1549 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1550 struct napi_struct *napi = &rx_ring->napi;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001551 size_t hlen = ETH_HLEN;
Ron Mercer4f848c02010-01-02 10:37:43 +00001552
1553 skb = netdev_alloc_skb(ndev, length);
1554 if (!skb) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001555 rx_ring->rx_dropped++;
1556 put_page(lbq_desc->p.pg_chunk.page);
1557 return;
1558 }
1559
1560 addr = lbq_desc->p.pg_chunk.va;
1561 prefetch(addr);
1562
Sritej Velagaae721f32013-04-18 19:49:52 +00001563 /* Frame error, so drop the packet. */
1564 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1565 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1566 goto err_out;
1567 }
1568
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001569 /* Update the MAC header length*/
1570 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1571
Ron Mercer4f848c02010-01-02 10:37:43 +00001572 /* The max framesize filter on this chip is set higher than
1573 * MTU since FCoE uses 2k frames.
1574 */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001575 if (skb->len > ndev->mtu + hlen) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001576 netif_err(qdev, drv, qdev->ndev,
1577 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001578 rx_ring->rx_dropped++;
1579 goto err_out;
1580 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001581 memcpy(skb_put(skb, hlen), addr, hlen);
Joe Perchesae9540f72010-02-09 11:49:52 +00001582 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1583 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1584 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001585 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001586 lbq_desc->p.pg_chunk.offset + hlen,
1587 length - hlen);
1588 skb->len += length - hlen;
1589 skb->data_len += length - hlen;
1590 skb->truesize += length - hlen;
Ron Mercer4f848c02010-01-02 10:37:43 +00001591
1592 rx_ring->rx_packets++;
1593 rx_ring->rx_bytes += skb->len;
1594 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001595 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001596
Michał Mirosław88230fd2011-04-18 13:31:21 +00001597 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001598 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1599 /* TCP frame. */
1600 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001601 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1602 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001603 skb->ip_summed = CHECKSUM_UNNECESSARY;
1604 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1605 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1606 /* Unfragmented ipv4 UDP frame. */
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001607 struct iphdr *iph =
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001608 (struct iphdr *)((u8 *)addr + hlen);
Ron Mercer4f848c02010-01-02 10:37:43 +00001609 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001610 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001611 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001612 netif_printk(qdev, rx_status, KERN_DEBUG,
1613 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001614 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001615 }
1616 }
1617 }
1618
1619 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001620 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001621 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001622 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1623 napi_gro_receive(napi, skb);
1624 else
1625 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001626 return;
1627err_out:
1628 dev_kfree_skb_any(skb);
1629 put_page(lbq_desc->p.pg_chunk.page);
1630}
1631
1632/* Process an inbound completion from an rx ring. */
1633static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1634 struct rx_ring *rx_ring,
1635 struct ib_mac_iocb_rsp *ib_mac_rsp,
1636 u32 length,
1637 u16 vlan_id)
1638{
1639 struct net_device *ndev = qdev->ndev;
1640 struct sk_buff *skb = NULL;
1641 struct sk_buff *new_skb = NULL;
1642 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1643
1644 skb = sbq_desc->p.skb;
1645 /* Allocate new_skb and copy */
1646 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1647 if (new_skb == NULL) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001648 rx_ring->rx_dropped++;
1649 return;
1650 }
1651 skb_reserve(new_skb, NET_IP_ALIGN);
1652 memcpy(skb_put(new_skb, length), skb->data, length);
1653 skb = new_skb;
1654
Sritej Velagaae721f32013-04-18 19:49:52 +00001655 /* Frame error, so drop the packet. */
1656 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1657 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1658 dev_kfree_skb_any(skb);
1659 return;
1660 }
1661
Ron Mercer4f848c02010-01-02 10:37:43 +00001662 /* loopback self test for ethtool */
1663 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1664 ql_check_lb_frame(qdev, skb);
1665 dev_kfree_skb_any(skb);
1666 return;
1667 }
1668
1669 /* The max framesize filter on this chip is set higher than
1670 * MTU since FCoE uses 2k frames.
1671 */
1672 if (skb->len > ndev->mtu + ETH_HLEN) {
1673 dev_kfree_skb_any(skb);
1674 rx_ring->rx_dropped++;
1675 return;
1676 }
1677
1678 prefetch(skb->data);
Ron Mercer4f848c02010-01-02 10:37:43 +00001679 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001680 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1681 "%s Multicast.\n",
1682 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1683 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1684 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1685 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1686 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1687 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001688 }
1689 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f72010-02-09 11:49:52 +00001690 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001692
1693 rx_ring->rx_packets++;
1694 rx_ring->rx_bytes += skb->len;
1695 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001696 skb_checksum_none_assert(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001697
1698 /* If rx checksum is on, and there are no
1699 * csum or frame errors.
1700 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00001701 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercer4f848c02010-01-02 10:37:43 +00001702 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1703 /* TCP frame. */
1704 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001705 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1706 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001707 skb->ip_summed = CHECKSUM_UNNECESSARY;
1708 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1709 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1710 /* Unfragmented ipv4 UDP frame. */
1711 struct iphdr *iph = (struct iphdr *) skb->data;
1712 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00001713 htons(IP_MF|IP_OFFSET))) {
Ron Mercer4f848c02010-01-02 10:37:43 +00001714 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00001715 netif_printk(qdev, rx_status, KERN_DEBUG,
1716 qdev->ndev,
Jitendra Kalsariae02ef3312012-02-03 14:06:49 +00001717 "UDP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001718 }
1719 }
1720 }
1721
1722 skb_record_rx_queue(skb, rx_ring->cq_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001723 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001724 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00001725 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1726 napi_gro_receive(&rx_ring->napi, skb);
1727 else
1728 netif_receive_skb(skb);
Ron Mercer4f848c02010-01-02 10:37:43 +00001729}
1730
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001731static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001732{
1733 void *temp_addr = skb->data;
1734
1735 /* Undo the skb_reserve(skb,32) we did before
1736 * giving to hardware, and realign data on
1737 * a 2-byte boundary.
1738 */
1739 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1740 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1741 skb_copy_to_linear_data(skb, temp_addr,
1742 (unsigned int)len);
1743}
1744
1745/*
1746 * This function builds an skb for the given inbound
1747 * completion. It will be rewritten for readability in the near
1748 * future, but for not it works well.
1749 */
1750static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1751 struct rx_ring *rx_ring,
1752 struct ib_mac_iocb_rsp *ib_mac_rsp)
1753{
1754 struct bq_desc *lbq_desc;
1755 struct bq_desc *sbq_desc;
1756 struct sk_buff *skb = NULL;
1757 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001758 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1759 size_t hlen = ETH_HLEN;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760
1761 /*
1762 * Handle the header buffer if present.
1763 */
1764 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1765 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001766 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1767 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001768 /*
1769 * Headers fit nicely into a small buffer.
1770 */
1771 sbq_desc = ql_get_curr_sbuf(rx_ring);
1772 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001773 dma_unmap_addr(sbq_desc, mapaddr),
1774 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001775 PCI_DMA_FROMDEVICE);
1776 skb = sbq_desc->p.skb;
1777 ql_realign_skb(skb, hdr_len);
1778 skb_put(skb, hdr_len);
1779 sbq_desc->p.skb = NULL;
1780 }
1781
1782 /*
1783 * Handle the data buffer(s).
1784 */
1785 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f72010-02-09 11:49:52 +00001786 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1787 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001788 return skb;
1789 }
1790
1791 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1792 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001793 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1794 "Headers in small, data of %d bytes in small, combine them.\n",
1795 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 /*
1797 * Data is less than small buffer size so it's
1798 * stuffed in a small buffer.
1799 * For this case we append the data
1800 * from the "data" small buffer to the "header" small
1801 * buffer.
1802 */
1803 sbq_desc = ql_get_curr_sbuf(rx_ring);
1804 pci_dma_sync_single_for_cpu(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001805 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001806 (sbq_desc, mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001807 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001808 (sbq_desc, maplen),
1809 PCI_DMA_FROMDEVICE);
1810 memcpy(skb_put(skb, length),
1811 sbq_desc->p.skb->data, length);
1812 pci_dma_sync_single_for_device(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001813 dma_unmap_addr
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001814 (sbq_desc,
1815 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001816 dma_unmap_len
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001817 (sbq_desc,
1818 maplen),
1819 PCI_DMA_FROMDEVICE);
1820 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00001821 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1822 "%d bytes in a single small buffer.\n",
1823 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001824 sbq_desc = ql_get_curr_sbuf(rx_ring);
1825 skb = sbq_desc->p.skb;
1826 ql_realign_skb(skb, length);
1827 skb_put(skb, length);
1828 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001829 dma_unmap_addr(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001830 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001831 dma_unmap_len(sbq_desc,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001832 maplen),
1833 PCI_DMA_FROMDEVICE);
1834 sbq_desc->p.skb = NULL;
1835 }
1836 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1837 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001838 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839 "Header in small, %d bytes in large. Chain large to small!\n",
1840 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001841 /*
1842 * The data is in a single large buffer. We
1843 * chain it to the header buffer's skb and let
1844 * it rip.
1845 */
Ron Mercer7c734352009-10-19 03:32:19 +00001846 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f72010-02-09 11:49:52 +00001847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848 "Chaining page at offset = %d, for %d bytes to skb.\n",
1849 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001850 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1851 lbq_desc->p.pg_chunk.offset,
1852 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001853 skb->len += length;
1854 skb->data_len += length;
1855 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001856 } else {
1857 /*
1858 * The headers and data are in a single large buffer. We
1859 * copy it to a new skb and let it go. This can happen with
1860 * jumbo mtu on a non-TCP/UDP frame.
1861 */
Ron Mercer7c734352009-10-19 03:32:19 +00001862 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001863 skb = netdev_alloc_skb(qdev->ndev, length);
1864 if (skb == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001865 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1866 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001867 return NULL;
1868 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001869 pci_unmap_page(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001870 dma_unmap_addr(lbq_desc,
Ron Mercer4055c7d42009-01-04 17:07:09 -08001871 mapaddr),
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001872 dma_unmap_len(lbq_desc, maplen),
Ron Mercer4055c7d42009-01-04 17:07:09 -08001873 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001874 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f72010-02-09 11:49:52 +00001875 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1876 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1877 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001878 skb_fill_page_desc(skb, 0,
1879 lbq_desc->p.pg_chunk.page,
1880 lbq_desc->p.pg_chunk.offset,
1881 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001882 skb->len += length;
1883 skb->data_len += length;
1884 skb->truesize += length;
1885 length -= length;
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001886 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1887 lbq_desc->p.pg_chunk.va,
1888 &hlen);
1889 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001890 }
1891 } else {
1892 /*
1893 * The data is in a chain of large buffers
1894 * pointed to by a small buffer. We loop
1895 * thru and chain them to the our small header
1896 * buffer's skb.
1897 * frags: There are 18 max frags and our small
1898 * buffer will hold 32 of them. The thing is,
1899 * we'll use 3 max for our 9000 byte jumbo
1900 * frames. If the MTU goes up we could
1901 * eventually be in trouble.
1902 */
Ron Mercer7c734352009-10-19 03:32:19 +00001903 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001904 sbq_desc = ql_get_curr_sbuf(rx_ring);
1905 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00001906 dma_unmap_addr(sbq_desc, mapaddr),
1907 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001908 PCI_DMA_FROMDEVICE);
1909 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1910 /*
1911 * This is an non TCP/UDP IP frame, so
1912 * the headers aren't split into a small
1913 * buffer. We have to use the small buffer
1914 * that contains our sg list as our skb to
1915 * send upstairs. Copy the sg list here to
1916 * a local buffer and use it to find the
1917 * pages to chain.
1918 */
Joe Perchesae9540f72010-02-09 11:49:52 +00001919 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1920 "%d bytes of headers & data in chain of large.\n",
1921 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001922 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001923 sbq_desc->p.skb = NULL;
1924 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001925 }
1926 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001927 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1928 size = (length < rx_ring->lbq_buf_size) ? length :
1929 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001930
Joe Perchesae9540f72010-02-09 11:49:52 +00001931 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1932 "Adding page %d to skb for %d bytes.\n",
1933 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001934 skb_fill_page_desc(skb, i,
1935 lbq_desc->p.pg_chunk.page,
1936 lbq_desc->p.pg_chunk.offset,
1937 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001938 skb->len += size;
1939 skb->data_len += size;
1940 skb->truesize += size;
1941 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001942 i++;
1943 }
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04001944 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1945 &hlen);
1946 __pskb_pull_tail(skb, hlen);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001947 }
1948 return skb;
1949}
1950
1951/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001952static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001953 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001954 struct ib_mac_iocb_rsp *ib_mac_rsp,
1955 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001956{
1957 struct net_device *ndev = qdev->ndev;
1958 struct sk_buff *skb = NULL;
1959
1960 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1961
1962 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1963 if (unlikely(!skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001964 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001966 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001967 return;
1968 }
1969
Sritej Velagaae721f32013-04-18 19:49:52 +00001970 /* Frame error, so drop the packet. */
1971 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1972 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1973 dev_kfree_skb_any(skb);
1974 return;
1975 }
1976
Ron Mercerec33a492009-06-09 05:39:28 +00001977 /* The max framesize filter on this chip is set higher than
1978 * MTU since FCoE uses 2k frames.
1979 */
1980 if (skb->len > ndev->mtu + ETH_HLEN) {
1981 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001982 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001983 return;
1984 }
1985
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001986 /* loopback self test for ethtool */
1987 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1988 ql_check_lb_frame(qdev, skb);
1989 dev_kfree_skb_any(skb);
1990 return;
1991 }
1992
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001993 prefetch(skb->data);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001994 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00001995 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1996 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1997 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1998 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1999 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2000 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2001 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00002002 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002003 }
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002005 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002007 }
Ron Mercerd555f592009-03-09 10:59:19 +00002008
Ron Mercerd555f592009-03-09 10:59:19 +00002009 skb->protocol = eth_type_trans(skb, ndev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07002010 skb_checksum_none_assert(skb);
Ron Mercerd555f592009-03-09 10:59:19 +00002011
2012 /* If rx checksum is on, and there are no
2013 * csum or frame errors.
2014 */
Michał Mirosław88230fd2011-04-18 13:31:21 +00002015 if ((ndev->features & NETIF_F_RXCSUM) &&
Ron Mercerd555f592009-03-09 10:59:19 +00002016 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2017 /* TCP frame. */
2018 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002019 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2020 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002021 skb->ip_summed = CHECKSUM_UNNECESSARY;
2022 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2023 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2024 /* Unfragmented ipv4 UDP frame. */
2025 struct iphdr *iph = (struct iphdr *) skb->data;
2026 if (!(iph->frag_off &
Li RongQing0d653ed82012-07-09 22:02:42 +00002027 htons(IP_MF|IP_OFFSET))) {
Ron Mercerd555f592009-03-09 10:59:19 +00002028 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f72010-02-09 11:49:52 +00002029 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2030 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002031 }
2032 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002033 }
Ron Mercerd555f592009-03-09 10:59:19 +00002034
Ron Mercer885ee392009-11-03 13:49:31 +00002035 rx_ring->rx_packets++;
2036 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002037 skb_record_rx_queue(skb, rx_ring->cq_id);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002038 if (vlan_id != 0xffff)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002039 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002040 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2041 napi_gro_receive(&rx_ring->napi, skb);
2042 else
2043 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002044}
2045
Ron Mercer4f848c02010-01-02 10:37:43 +00002046/* Process an inbound completion from an rx ring. */
2047static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2048 struct rx_ring *rx_ring,
2049 struct ib_mac_iocb_rsp *ib_mac_rsp)
2050{
2051 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002052 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2053 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
Ron Mercer4f848c02010-01-02 10:37:43 +00002054 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2055 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2056
2057 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2058
2059 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2060 /* The data and headers are split into
2061 * separate buffers.
2062 */
2063 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2064 vlan_id);
2065 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2066 /* The data fit in a single small buffer.
2067 * Allocate a new skb, copy the data and
2068 * return the buffer to the free pool.
2069 */
2070 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2071 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002072 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2073 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2074 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2075 /* TCP packet in a page chunk that's been checksummed.
2076 * Tack it on to our GRO skb and let it go.
2077 */
2078 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2079 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002080 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2081 /* Non-TCP packet in a page chunk. Allocate an
2082 * skb, tack it on frags, and send it up.
2083 */
2084 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2085 length, vlan_id);
2086 } else {
Ron Mercerc0c56952010-02-17 06:41:21 +00002087 /* Non-TCP/UDP large frames that span multiple buffers
2088 * can be processed corrrectly by the split frame logic.
2089 */
2090 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2091 vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002092 }
2093
2094 return (unsigned long)length;
2095}
2096
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002097/* Process an outbound completion from an rx ring. */
2098static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2099 struct ob_mac_iocb_rsp *mac_rsp)
2100{
2101 struct tx_ring *tx_ring;
2102 struct tx_ring_desc *tx_ring_desc;
2103
2104 QL_DUMP_OB_MAC_RSP(mac_rsp);
2105 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2106 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2107 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002108 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2109 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002110 dev_kfree_skb(tx_ring_desc->skb);
2111 tx_ring_desc->skb = NULL;
2112
2113 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2114 OB_MAC_IOCB_RSP_S |
2115 OB_MAC_IOCB_RSP_L |
2116 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2117 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002118 netif_warn(qdev, tx_done, qdev->ndev,
2119 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002120 }
2121 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002122 netif_warn(qdev, tx_done, qdev->ndev,
2123 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002124 }
2125 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002126 netif_warn(qdev, tx_done, qdev->ndev,
2127 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002128 }
2129 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002130 netif_warn(qdev, tx_done, qdev->ndev,
2131 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002132 }
2133 }
2134 atomic_inc(&tx_ring->tx_count);
2135}
2136
2137/* Fire up a handler to reset the MPI processor. */
2138void ql_queue_fw_error(struct ql_adapter *qdev)
2139{
Ron Mercer6a473302009-07-02 06:06:12 +00002140 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002141 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2142}
2143
2144void ql_queue_asic_error(struct ql_adapter *qdev)
2145{
Ron Mercer6a473302009-07-02 06:06:12 +00002146 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002147 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002148 /* Clear adapter up bit to signal the recovery
2149 * process that it shouldn't kill the reset worker
2150 * thread
2151 */
2152 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Jitendra Kalsariada92b392011-06-30 10:02:05 +00002153 /* Set asic recovery bit to indicate reset process that we are
2154 * in fatal error recovery process rather than normal close
2155 */
2156 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002157 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2158}
2159
2160static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2161 struct ib_ae_iocb_rsp *ib_ae_rsp)
2162{
2163 switch (ib_ae_rsp->event) {
2164 case MGMT_ERR_EVENT:
Joe Perchesae9540f72010-02-09 11:49:52 +00002165 netif_err(qdev, rx_err, qdev->ndev,
2166 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002167 ql_queue_fw_error(qdev);
2168 return;
2169
2170 case CAM_LOOKUP_ERR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002171 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2172 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002173 ql_queue_asic_error(qdev);
2174 return;
2175
2176 case SOFT_ECC_ERROR_EVENT:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002177 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002178 ql_queue_asic_error(qdev);
2179 break;
2180
2181 case PCI_ERR_ANON_BUF_RD:
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002182 netdev_err(qdev->ndev, "PCI error occurred when reading "
2183 "anonymous buffers from rx_ring %d.\n",
2184 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002185 ql_queue_asic_error(qdev);
2186 break;
2187
2188 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002189 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2190 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002191 ql_queue_asic_error(qdev);
2192 break;
2193 }
2194}
2195
2196static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2197{
2198 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002199 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002200 struct ob_mac_iocb_rsp *net_rsp = NULL;
2201 int count = 0;
2202
Ron Mercer1e213302009-03-09 10:59:21 +00002203 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002204 /* While there are entries in the completion queue. */
2205 while (prod != rx_ring->cnsmr_idx) {
2206
Joe Perchesae9540f72010-02-09 11:49:52 +00002207 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002210
2211 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2212 rmb();
2213 switch (net_rsp->opcode) {
2214
2215 case OPCODE_OB_MAC_TSO_IOCB:
2216 case OPCODE_OB_MAC_IOCB:
2217 ql_process_mac_tx_intr(qdev, net_rsp);
2218 break;
2219 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002220 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2222 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002223 }
2224 count++;
2225 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002226 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002227 }
Dan Carpenter4da79502010-08-19 08:52:44 +00002228 if (!net_rsp)
2229 return 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002230 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002231 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
Dan Carpenter4da79502010-08-19 08:52:44 +00002232 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
Jitendra Kalsariad0de7302012-07-10 14:57:32 +00002233 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002234 /*
2235 * The queue got stopped because the tx_ring was full.
2236 * Wake it up, because it's now at least 25% empty.
2237 */
Ron Mercer1e213302009-03-09 10:59:21 +00002238 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239 }
2240
2241 return count;
2242}
2243
2244static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2245{
2246 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002247 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002248 struct ql_net_rsp_iocb *net_rsp;
2249 int count = 0;
2250
2251 /* While there are entries in the completion queue. */
2252 while (prod != rx_ring->cnsmr_idx) {
2253
Joe Perchesae9540f72010-02-09 11:49:52 +00002254 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2255 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2256 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002257
2258 net_rsp = rx_ring->curr_entry;
2259 rmb();
2260 switch (net_rsp->opcode) {
2261 case OPCODE_IB_MAC_IOCB:
2262 ql_process_mac_rx_intr(qdev, rx_ring,
2263 (struct ib_mac_iocb_rsp *)
2264 net_rsp);
2265 break;
2266
2267 case OPCODE_IB_AE_IOCB:
2268 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2269 net_rsp);
2270 break;
2271 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00002272 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2274 net_rsp->opcode);
2275 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002276 }
2277 count++;
2278 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002279 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002280 if (count == budget)
2281 break;
2282 }
2283 ql_update_buffer_queues(qdev, rx_ring);
2284 ql_write_cq_idx(rx_ring);
2285 return count;
2286}
2287
2288static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2289{
2290 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2291 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002292 struct rx_ring *trx_ring;
2293 int i, work_done = 0;
2294 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002295
Joe Perchesae9540f72010-02-09 11:49:52 +00002296 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2297 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002298
Ron Mercer39aa8162009-08-27 11:02:11 +00002299 /* Service the TX rings first. They start
2300 * right after the RSS rings. */
2301 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2302 trx_ring = &qdev->rx_ring[i];
2303 /* If this TX completion ring belongs to this vector and
2304 * it's not empty then service it.
2305 */
2306 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2307 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2308 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002309 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2310 "%s: Servicing TX completion ring %d.\n",
2311 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002312 ql_clean_outbound_rx_ring(trx_ring);
2313 }
2314 }
2315
2316 /*
2317 * Now service the RSS ring if it's active.
2318 */
2319 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2320 rx_ring->cnsmr_idx) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002321 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2322 "%s: Servicing RX completion ring %d.\n",
2323 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002324 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2325 }
2326
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002327 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002328 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002329 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2330 }
2331 return work_done;
2332}
2333
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002334static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002335{
2336 struct ql_adapter *qdev = netdev_priv(ndev);
2337
Patrick McHardyf6469682013-04-19 02:04:27 +00002338 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002339 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
Jiri Pirko18c49b92011-07-21 03:24:11 +00002340 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002341 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002342 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2343 }
2344}
2345
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002346/**
2347 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2348 * based on the features to enable/disable hardware vlan accel
2349 */
2350static int qlge_update_hw_vlan_features(struct net_device *ndev,
2351 netdev_features_t features)
2352{
2353 struct ql_adapter *qdev = netdev_priv(ndev);
2354 int status = 0;
2355
2356 status = ql_adapter_down(qdev);
2357 if (status) {
2358 netif_err(qdev, link, qdev->ndev,
2359 "Failed to bring down the adapter\n");
2360 return status;
2361 }
2362
2363 /* update the features with resent change */
2364 ndev->features = features;
2365
2366 status = ql_adapter_up(qdev);
2367 if (status) {
2368 netif_err(qdev, link, qdev->ndev,
2369 "Failed to bring up the adapter\n");
2370 return status;
2371 }
2372 return status;
2373}
2374
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002375static netdev_features_t qlge_fix_features(struct net_device *ndev,
2376 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002377{
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002378 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002379 /*
2380 * Since there is no support for separate rx/tx vlan accel
2381 * enable/disable make sure tx flag is always in same state as rx.
2382 */
Patrick McHardyf6469682013-04-19 02:04:27 +00002383 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2384 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002385 else
Patrick McHardyf6469682013-04-19 02:04:27 +00002386 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002387
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04002388 /* Update the behavior of vlan accel in the adapter */
2389 err = qlge_update_hw_vlan_features(ndev, features);
2390 if (err)
2391 return err;
2392
Jiri Pirko18c49b92011-07-21 03:24:11 +00002393 return features;
2394}
2395
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002396static int qlge_set_features(struct net_device *ndev,
2397 netdev_features_t features)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002398{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002399 netdev_features_t changed = ndev->features ^ features;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002400
Patrick McHardyf6469682013-04-19 02:04:27 +00002401 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002402 qlge_vlan_mode(ndev, features);
2403
2404 return 0;
2405}
2406
Jiri Pirko8e586132011-12-08 19:52:37 -05002407static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002408{
2409 u32 enable_bit = MAC_ADDR_E;
Jiri Pirko8e586132011-12-08 19:52:37 -05002410 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002411
Jiri Pirko8e586132011-12-08 19:52:37 -05002412 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2413 MAC_ADDR_TYPE_VLAN, vid);
2414 if (err)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002415 netif_err(qdev, ifup, qdev->ndev,
2416 "Failed to init vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002417 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002418}
2419
Patrick McHardy80d5c362013-04-19 02:04:28 +00002420static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002421{
2422 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercercc288f52009-02-23 10:42:14 +00002423 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002424 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002425
Ron Mercercc288f52009-02-23 10:42:14 +00002426 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2427 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002428 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002429
Jiri Pirko8e586132011-12-08 19:52:37 -05002430 err = __qlge_vlan_rx_add_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002431 set_bit(vid, qdev->active_vlans);
2432
Ron Mercercc288f52009-02-23 10:42:14 +00002433 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002434
2435 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002436}
2437
Jiri Pirko8e586132011-12-08 19:52:37 -05002438static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002439{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002440 u32 enable_bit = 0;
Jiri Pirko8e586132011-12-08 19:52:37 -05002441 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002442
Jiri Pirko8e586132011-12-08 19:52:37 -05002443 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2444 MAC_ADDR_TYPE_VLAN, vid);
2445 if (err)
Joe Perchesae9540f72010-02-09 11:49:52 +00002446 netif_err(qdev, ifup, qdev->ndev,
2447 "Failed to clear vlan address.\n");
Jiri Pirko8e586132011-12-08 19:52:37 -05002448 return err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002449}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002450
Patrick McHardy80d5c362013-04-19 02:04:28 +00002451static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
Jiri Pirko18c49b92011-07-21 03:24:11 +00002452{
2453 struct ql_adapter *qdev = netdev_priv(ndev);
2454 int status;
Jiri Pirko8e586132011-12-08 19:52:37 -05002455 int err;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002456
2457 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2458 if (status)
Jiri Pirko8e586132011-12-08 19:52:37 -05002459 return status;
Jiri Pirko18c49b92011-07-21 03:24:11 +00002460
Jiri Pirko8e586132011-12-08 19:52:37 -05002461 err = __qlge_vlan_rx_kill_vid(qdev, vid);
Jiri Pirko18c49b92011-07-21 03:24:11 +00002462 clear_bit(vid, qdev->active_vlans);
2463
2464 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Jiri Pirko8e586132011-12-08 19:52:37 -05002465
2466 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002467}
2468
Ron Mercerc1b60092010-10-27 04:58:12 +00002469static void qlge_restore_vlan(struct ql_adapter *qdev)
2470{
Jiri Pirko18c49b92011-07-21 03:24:11 +00002471 int status;
2472 u16 vid;
Ron Mercerc1b60092010-10-27 04:58:12 +00002473
Jiri Pirko18c49b92011-07-21 03:24:11 +00002474 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2475 if (status)
2476 return;
2477
2478 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2479 __qlge_vlan_rx_add_vid(qdev, vid);
2480
2481 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc1b60092010-10-27 04:58:12 +00002482}
2483
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002484/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2485static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2486{
2487 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002488 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002489 return IRQ_HANDLED;
2490}
2491
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002492/* This handles a fatal error, MPI activity, and the default
2493 * rx_ring in an MSI-X multiple vector environment.
2494 * In MSI/Legacy environment it also process the rest of
2495 * the rx_rings.
2496 */
2497static irqreturn_t qlge_isr(int irq, void *dev_id)
2498{
2499 struct rx_ring *rx_ring = dev_id;
2500 struct ql_adapter *qdev = rx_ring->qdev;
2501 struct intr_context *intr_context = &qdev->intr_context[0];
2502 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002503 int work_done = 0;
2504
Ron Mercerbb0d2152008-10-20 10:30:26 -07002505 spin_lock(&qdev->hw_lock);
2506 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002507 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2508 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002509 spin_unlock(&qdev->hw_lock);
2510 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002511 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002512 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002513
Ron Mercerbb0d2152008-10-20 10:30:26 -07002514 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002515
2516 /*
2517 * Check for fatal error.
2518 */
2519 if (var & STS_FE) {
2520 ql_queue_asic_error(qdev);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002521 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002522 var = ql_read32(qdev, ERR_STS);
Jitendra Kalsaria5069ee52011-06-30 10:02:06 +00002523 netdev_err(qdev->ndev, "Resetting chip. "
2524 "Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002525 return IRQ_HANDLED;
2526 }
2527
2528 /*
2529 * Check MPI processor activity.
2530 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002531 if ((var & STS_PI) &&
2532 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002533 /*
2534 * We've got an async event or mailbox completion.
2535 * Handle it and clear the source of the interrupt.
2536 */
Joe Perchesae9540f72010-02-09 11:49:52 +00002537 netif_err(qdev, intr, qdev->ndev,
2538 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002539 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002540 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2541 queue_delayed_work_on(smp_processor_id(),
2542 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002543 work_done++;
2544 }
2545
2546 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002547 * Get the bit-mask that shows the active queues for this
2548 * pass. Compare it to the queues that this irq services
2549 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002550 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002551 var = ql_read32(qdev, ISR1);
2552 if (var & intr_context->irq_mask) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002553 netif_info(qdev, intr, qdev->ndev,
2554 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002555 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002556 napi_schedule(&rx_ring->napi);
2557 work_done++;
2558 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002559 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002560 return work_done ? IRQ_HANDLED : IRQ_NONE;
2561}
2562
2563static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2564{
2565
2566 if (skb_is_gso(skb)) {
2567 int err;
2568 if (skb_header_cloned(skb)) {
2569 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2570 if (err)
2571 return err;
2572 }
2573
2574 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2575 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2576 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2577 mac_iocb_ptr->total_hdrs_len =
2578 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2579 mac_iocb_ptr->net_trans_offset =
2580 cpu_to_le16(skb_network_offset(skb) |
2581 skb_transport_offset(skb)
2582 << OB_MAC_TRANSPORT_HDR_SHIFT);
2583 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2584 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2585 if (likely(skb->protocol == htons(ETH_P_IP))) {
2586 struct iphdr *iph = ip_hdr(skb);
2587 iph->check = 0;
2588 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2589 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2590 iph->daddr, 0,
2591 IPPROTO_TCP,
2592 0);
2593 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2594 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2595 tcp_hdr(skb)->check =
2596 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2597 &ipv6_hdr(skb)->daddr,
2598 0, IPPROTO_TCP, 0);
2599 }
2600 return 1;
2601 }
2602 return 0;
2603}
2604
2605static void ql_hw_csum_setup(struct sk_buff *skb,
2606 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2607{
2608 int len;
2609 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002610 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002611 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2612 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2613 mac_iocb_ptr->net_trans_offset =
2614 cpu_to_le16(skb_network_offset(skb) |
2615 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2616
2617 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2618 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2619 if (likely(iph->protocol == IPPROTO_TCP)) {
2620 check = &(tcp_hdr(skb)->check);
2621 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2622 mac_iocb_ptr->total_hdrs_len =
2623 cpu_to_le16(skb_transport_offset(skb) +
2624 (tcp_hdr(skb)->doff << 2));
2625 } else {
2626 check = &(udp_hdr(skb)->check);
2627 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2628 mac_iocb_ptr->total_hdrs_len =
2629 cpu_to_le16(skb_transport_offset(skb) +
2630 sizeof(struct udphdr));
2631 }
2632 *check = ~csum_tcpudp_magic(iph->saddr,
2633 iph->daddr, len, iph->protocol, 0);
2634}
2635
Stephen Hemminger613573252009-08-31 19:50:58 +00002636static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002637{
2638 struct tx_ring_desc *tx_ring_desc;
2639 struct ob_mac_iocb_req *mac_iocb_ptr;
2640 struct ql_adapter *qdev = netdev_priv(ndev);
2641 int tso;
2642 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002643 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002644
2645 tx_ring = &qdev->tx_ring[tx_ring_idx];
2646
Ron Mercer74c50b42009-03-09 10:59:27 +00002647 if (skb_padto(skb, ETH_ZLEN))
2648 return NETDEV_TX_OK;
2649
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002650 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002651 netif_info(qdev, tx_queued, qdev->ndev,
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002652 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00002653 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002654 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercer885ee392009-11-03 13:49:31 +00002655 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002656 return NETDEV_TX_BUSY;
2657 }
2658 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2659 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002660 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002661
2662 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2663 mac_iocb_ptr->tid = tx_ring_desc->index;
2664 /* We use the upper 32-bits to store the tx queue for this IO.
2665 * When we get the completion we can use it to establish the context.
2666 */
2667 mac_iocb_ptr->txq_idx = tx_ring_idx;
2668 tx_ring_desc->skb = skb;
2669
2670 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2671
Jesse Grosseab6d182010-10-20 13:56:03 +00002672 if (vlan_tx_tag_present(skb)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002673 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2674 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002675 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2676 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2677 }
2678 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2679 if (tso < 0) {
2680 dev_kfree_skb_any(skb);
2681 return NETDEV_TX_OK;
2682 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2683 ql_hw_csum_setup(skb,
2684 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2685 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002686 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2687 NETDEV_TX_OK) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002688 netif_err(qdev, tx_queued, qdev->ndev,
2689 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002690 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002691 return NETDEV_TX_BUSY;
2692 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002693 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2694 tx_ring->prod_idx++;
2695 if (tx_ring->prod_idx == tx_ring->wq_len)
2696 tx_ring->prod_idx = 0;
2697 wmb();
2698
2699 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f72010-02-09 11:49:52 +00002700 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2701 "tx queued, slot %d, len %d\n",
2702 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002703
2704 atomic_dec(&tx_ring->tx_count);
Jitendra Kalsaria41812db2012-07-10 14:57:31 +00002705
2706 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2707 netif_stop_subqueue(ndev, tx_ring->wq_id);
2708 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2709 /*
2710 * The queue got stopped because the tx_ring was full.
2711 * Wake it up, because it's now at least 25% empty.
2712 */
2713 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2714 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002715 return NETDEV_TX_OK;
2716}
2717
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002718
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002719static void ql_free_shadow_space(struct ql_adapter *qdev)
2720{
2721 if (qdev->rx_ring_shadow_reg_area) {
2722 pci_free_consistent(qdev->pdev,
2723 PAGE_SIZE,
2724 qdev->rx_ring_shadow_reg_area,
2725 qdev->rx_ring_shadow_reg_dma);
2726 qdev->rx_ring_shadow_reg_area = NULL;
2727 }
2728 if (qdev->tx_ring_shadow_reg_area) {
2729 pci_free_consistent(qdev->pdev,
2730 PAGE_SIZE,
2731 qdev->tx_ring_shadow_reg_area,
2732 qdev->tx_ring_shadow_reg_dma);
2733 qdev->tx_ring_shadow_reg_area = NULL;
2734 }
2735}
2736
2737static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2738{
2739 qdev->rx_ring_shadow_reg_area =
2740 pci_alloc_consistent(qdev->pdev,
2741 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2742 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002743 netif_err(qdev, ifup, qdev->ndev,
2744 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002745 return -ENOMEM;
2746 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002747 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002748 qdev->tx_ring_shadow_reg_area =
2749 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2750 &qdev->tx_ring_shadow_reg_dma);
2751 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002752 netif_err(qdev, ifup, qdev->ndev,
2753 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002754 goto err_wqp_sh_area;
2755 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002756 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002757 return 0;
2758
2759err_wqp_sh_area:
2760 pci_free_consistent(qdev->pdev,
2761 PAGE_SIZE,
2762 qdev->rx_ring_shadow_reg_area,
2763 qdev->rx_ring_shadow_reg_dma);
2764 return -ENOMEM;
2765}
2766
2767static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2768{
2769 struct tx_ring_desc *tx_ring_desc;
2770 int i;
2771 struct ob_mac_iocb_req *mac_iocb_ptr;
2772
2773 mac_iocb_ptr = tx_ring->wq_base;
2774 tx_ring_desc = tx_ring->q;
2775 for (i = 0; i < tx_ring->wq_len; i++) {
2776 tx_ring_desc->index = i;
2777 tx_ring_desc->skb = NULL;
2778 tx_ring_desc->queue_entry = mac_iocb_ptr;
2779 mac_iocb_ptr++;
2780 tx_ring_desc++;
2781 }
2782 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002783}
2784
2785static void ql_free_tx_resources(struct ql_adapter *qdev,
2786 struct tx_ring *tx_ring)
2787{
2788 if (tx_ring->wq_base) {
2789 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2790 tx_ring->wq_base, tx_ring->wq_base_dma);
2791 tx_ring->wq_base = NULL;
2792 }
2793 kfree(tx_ring->q);
2794 tx_ring->q = NULL;
2795}
2796
2797static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2798 struct tx_ring *tx_ring)
2799{
2800 tx_ring->wq_base =
2801 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2802 &tx_ring->wq_base_dma);
2803
Joe Perches8e95a202009-12-03 07:58:21 +00002804 if ((tx_ring->wq_base == NULL) ||
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002805 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2806 goto pci_alloc_err;
2807
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002808 tx_ring->q =
2809 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2810 if (tx_ring->q == NULL)
2811 goto err;
2812
2813 return 0;
2814err:
2815 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2816 tx_ring->wq_base, tx_ring->wq_base_dma);
Jitendra Kalsariaf5c44412012-07-10 14:57:36 +00002817 tx_ring->wq_base = NULL;
2818pci_alloc_err:
2819 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002820 return -ENOMEM;
2821}
2822
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002823static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002824{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002825 struct bq_desc *lbq_desc;
2826
Ron Mercer7c734352009-10-19 03:32:19 +00002827 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002828
Ron Mercer7c734352009-10-19 03:32:19 +00002829 curr_idx = rx_ring->lbq_curr_idx;
2830 clean_idx = rx_ring->lbq_clean_idx;
2831 while (curr_idx != clean_idx) {
2832 lbq_desc = &rx_ring->lbq[curr_idx];
2833
2834 if (lbq_desc->p.pg_chunk.last_flag) {
2835 pci_unmap_page(qdev->pdev,
2836 lbq_desc->p.pg_chunk.map,
2837 ql_lbq_block_size(qdev),
2838 PCI_DMA_FROMDEVICE);
2839 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002840 }
Ron Mercer7c734352009-10-19 03:32:19 +00002841
2842 put_page(lbq_desc->p.pg_chunk.page);
2843 lbq_desc->p.pg_chunk.page = NULL;
2844
2845 if (++curr_idx == rx_ring->lbq_len)
2846 curr_idx = 0;
2847
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002848 }
Thadeu Lima de Souza Cascardoef380792013-05-11 09:15:37 +00002849 if (rx_ring->pg_chunk.page) {
2850 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2851 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2852 put_page(rx_ring->pg_chunk.page);
2853 rx_ring->pg_chunk.page = NULL;
2854 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002855}
2856
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002857static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002858{
2859 int i;
2860 struct bq_desc *sbq_desc;
2861
2862 for (i = 0; i < rx_ring->sbq_len; i++) {
2863 sbq_desc = &rx_ring->sbq[i];
2864 if (sbq_desc == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002865 netif_err(qdev, ifup, qdev->ndev,
2866 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002867 return;
2868 }
2869 if (sbq_desc->p.skb) {
2870 pci_unmap_single(qdev->pdev,
FUJITA Tomonori64b9b412010-04-12 14:32:14 +00002871 dma_unmap_addr(sbq_desc, mapaddr),
2872 dma_unmap_len(sbq_desc, maplen),
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002873 PCI_DMA_FROMDEVICE);
2874 dev_kfree_skb(sbq_desc->p.skb);
2875 sbq_desc->p.skb = NULL;
2876 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002877 }
2878}
2879
Ron Mercer4545a3f2009-02-23 10:42:17 +00002880/* Free all large and small rx buffers associated
2881 * with the completion queues for this device.
2882 */
2883static void ql_free_rx_buffers(struct ql_adapter *qdev)
2884{
2885 int i;
2886 struct rx_ring *rx_ring;
2887
2888 for (i = 0; i < qdev->rx_ring_count; i++) {
2889 rx_ring = &qdev->rx_ring[i];
2890 if (rx_ring->lbq)
2891 ql_free_lbq_buffers(qdev, rx_ring);
2892 if (rx_ring->sbq)
2893 ql_free_sbq_buffers(qdev, rx_ring);
2894 }
2895}
2896
2897static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2898{
2899 struct rx_ring *rx_ring;
2900 int i;
2901
2902 for (i = 0; i < qdev->rx_ring_count; i++) {
2903 rx_ring = &qdev->rx_ring[i];
2904 if (rx_ring->type != TX_Q)
2905 ql_update_buffer_queues(qdev, rx_ring);
2906 }
2907}
2908
2909static void ql_init_lbq_ring(struct ql_adapter *qdev,
2910 struct rx_ring *rx_ring)
2911{
2912 int i;
2913 struct bq_desc *lbq_desc;
2914 __le64 *bq = rx_ring->lbq_base;
2915
2916 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2917 for (i = 0; i < rx_ring->lbq_len; i++) {
2918 lbq_desc = &rx_ring->lbq[i];
2919 memset(lbq_desc, 0, sizeof(*lbq_desc));
2920 lbq_desc->index = i;
2921 lbq_desc->addr = bq;
2922 bq++;
2923 }
2924}
2925
2926static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002927 struct rx_ring *rx_ring)
2928{
2929 int i;
2930 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002931 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002932
Ron Mercer4545a3f2009-02-23 10:42:17 +00002933 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002934 for (i = 0; i < rx_ring->sbq_len; i++) {
2935 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002936 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002937 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002938 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002939 bq++;
2940 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002941}
2942
2943static void ql_free_rx_resources(struct ql_adapter *qdev,
2944 struct rx_ring *rx_ring)
2945{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002946 /* Free the small buffer queue. */
2947 if (rx_ring->sbq_base) {
2948 pci_free_consistent(qdev->pdev,
2949 rx_ring->sbq_size,
2950 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2951 rx_ring->sbq_base = NULL;
2952 }
2953
2954 /* Free the small buffer queue control blocks. */
2955 kfree(rx_ring->sbq);
2956 rx_ring->sbq = NULL;
2957
2958 /* Free the large buffer queue. */
2959 if (rx_ring->lbq_base) {
2960 pci_free_consistent(qdev->pdev,
2961 rx_ring->lbq_size,
2962 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2963 rx_ring->lbq_base = NULL;
2964 }
2965
2966 /* Free the large buffer queue control blocks. */
2967 kfree(rx_ring->lbq);
2968 rx_ring->lbq = NULL;
2969
2970 /* Free the rx queue. */
2971 if (rx_ring->cq_base) {
2972 pci_free_consistent(qdev->pdev,
2973 rx_ring->cq_size,
2974 rx_ring->cq_base, rx_ring->cq_base_dma);
2975 rx_ring->cq_base = NULL;
2976 }
2977}
2978
2979/* Allocate queues and buffers for this completions queue based
2980 * on the values in the parameter structure. */
2981static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2982 struct rx_ring *rx_ring)
2983{
2984
2985 /*
2986 * Allocate the completion queue for this rx_ring.
2987 */
2988 rx_ring->cq_base =
2989 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2990 &rx_ring->cq_base_dma);
2991
2992 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00002993 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002994 return -ENOMEM;
2995 }
2996
2997 if (rx_ring->sbq_len) {
2998 /*
2999 * Allocate small buffer queue.
3000 */
3001 rx_ring->sbq_base =
3002 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3003 &rx_ring->sbq_base_dma);
3004
3005 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003006 netif_err(qdev, ifup, qdev->ndev,
3007 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003008 goto err_mem;
3009 }
3010
3011 /*
3012 * Allocate small buffer queue control blocks.
3013 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003014 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3015 sizeof(struct bq_desc),
3016 GFP_KERNEL);
3017 if (rx_ring->sbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003018 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003019
Ron Mercer4545a3f2009-02-23 10:42:17 +00003020 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003021 }
3022
3023 if (rx_ring->lbq_len) {
3024 /*
3025 * Allocate large buffer queue.
3026 */
3027 rx_ring->lbq_base =
3028 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3029 &rx_ring->lbq_base_dma);
3030
3031 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003032 netif_err(qdev, ifup, qdev->ndev,
3033 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003034 goto err_mem;
3035 }
3036 /*
3037 * Allocate large buffer queue control blocks.
3038 */
Joe Perches14f8dc42013-02-07 11:46:27 +00003039 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3040 sizeof(struct bq_desc),
3041 GFP_KERNEL);
3042 if (rx_ring->lbq == NULL)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003043 goto err_mem;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003044
Ron Mercer4545a3f2009-02-23 10:42:17 +00003045 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003046 }
3047
3048 return 0;
3049
3050err_mem:
3051 ql_free_rx_resources(qdev, rx_ring);
3052 return -ENOMEM;
3053}
3054
3055static void ql_tx_ring_clean(struct ql_adapter *qdev)
3056{
3057 struct tx_ring *tx_ring;
3058 struct tx_ring_desc *tx_ring_desc;
3059 int i, j;
3060
3061 /*
3062 * Loop through all queues and free
3063 * any resources.
3064 */
3065 for (j = 0; j < qdev->tx_ring_count; j++) {
3066 tx_ring = &qdev->tx_ring[j];
3067 for (i = 0; i < tx_ring->wq_len; i++) {
3068 tx_ring_desc = &tx_ring->q[i];
3069 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003070 netif_err(qdev, ifdown, qdev->ndev,
3071 "Freeing lost SKB %p, from queue %d, index %d.\n",
3072 tx_ring_desc->skb, j,
3073 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003074 ql_unmap_send(qdev, tx_ring_desc,
3075 tx_ring_desc->map_cnt);
3076 dev_kfree_skb(tx_ring_desc->skb);
3077 tx_ring_desc->skb = NULL;
3078 }
3079 }
3080 }
3081}
3082
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003083static void ql_free_mem_resources(struct ql_adapter *qdev)
3084{
3085 int i;
3086
3087 for (i = 0; i < qdev->tx_ring_count; i++)
3088 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3089 for (i = 0; i < qdev->rx_ring_count; i++)
3090 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3091 ql_free_shadow_space(qdev);
3092}
3093
3094static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3095{
3096 int i;
3097
3098 /* Allocate space for our shadow registers and such. */
3099 if (ql_alloc_shadow_space(qdev))
3100 return -ENOMEM;
3101
3102 for (i = 0; i < qdev->rx_ring_count; i++) {
3103 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003104 netif_err(qdev, ifup, qdev->ndev,
3105 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003106 goto err_mem;
3107 }
3108 }
3109 /* Allocate tx queue resources */
3110 for (i = 0; i < qdev->tx_ring_count; i++) {
3111 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003112 netif_err(qdev, ifup, qdev->ndev,
3113 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003114 goto err_mem;
3115 }
3116 }
3117 return 0;
3118
3119err_mem:
3120 ql_free_mem_resources(qdev);
3121 return -ENOMEM;
3122}
3123
3124/* Set up the rx ring control block and pass it to the chip.
3125 * The control block is defined as
3126 * "Completion Queue Initialization Control Block", or cqicb.
3127 */
3128static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3129{
3130 struct cqicb *cqicb = &rx_ring->cqicb;
3131 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003132 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003133 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003134 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003135 void __iomem *doorbell_area =
3136 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3137 int err = 0;
3138 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003139 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003140 __le64 *base_indirect_ptr;
3141 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142
3143 /* Set up the shadow registers for this ring. */
3144 rx_ring->prod_idx_sh_reg = shadow_reg;
3145 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003146 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003147 shadow_reg += sizeof(u64);
3148 shadow_reg_dma += sizeof(u64);
3149 rx_ring->lbq_base_indirect = shadow_reg;
3150 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003151 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3152 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003153 rx_ring->sbq_base_indirect = shadow_reg;
3154 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3155
3156 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003157 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003158 rx_ring->cnsmr_idx = 0;
3159 rx_ring->curr_entry = rx_ring->cq_base;
3160
3161 /* PCI doorbell mem area + 0x04 for valid register */
3162 rx_ring->valid_db_reg = doorbell_area + 0x04;
3163
3164 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003165 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003166
3167 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003168 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003169
3170 memset((void *)cqicb, 0, sizeof(struct cqicb));
3171 cqicb->msix_vect = rx_ring->irq;
3172
Ron Mercer459caf52009-01-04 17:08:11 -08003173 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3174 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003175
Ron Mercer97345522009-01-09 11:31:50 +00003176 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003177
Ron Mercer97345522009-01-09 11:31:50 +00003178 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003179
3180 /*
3181 * Set up the control block load flags.
3182 */
3183 cqicb->flags = FLAGS_LC | /* Load queue base address */
3184 FLAGS_LV | /* Load MSI-X vector */
3185 FLAGS_LI; /* Load irq delay values */
3186 if (rx_ring->lbq_len) {
3187 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003188 tmp = (u64)rx_ring->lbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003189 base_indirect_ptr = rx_ring->lbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003190 page_entries = 0;
3191 do {
3192 *base_indirect_ptr = cpu_to_le64(tmp);
3193 tmp += DB_PAGE_SIZE;
3194 base_indirect_ptr++;
3195 page_entries++;
3196 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003197 cqicb->lbq_addr =
3198 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003199 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3200 (u16) rx_ring->lbq_buf_size;
3201 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3202 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3203 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003204 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003205 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003206 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003207 rx_ring->lbq_clean_idx = 0;
3208 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003209 }
3210 if (rx_ring->sbq_len) {
3211 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003212 tmp = (u64)rx_ring->sbq_base_dma;
Joe Perches43d620c2011-06-16 19:08:06 +00003213 base_indirect_ptr = rx_ring->sbq_base_indirect;
Ron Mercerb8facca2009-06-10 15:49:34 +00003214 page_entries = 0;
3215 do {
3216 *base_indirect_ptr = cpu_to_le64(tmp);
3217 tmp += DB_PAGE_SIZE;
3218 base_indirect_ptr++;
3219 page_entries++;
3220 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003221 cqicb->sbq_addr =
3222 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003223 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003224 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003225 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3226 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003227 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003228 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003229 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003230 rx_ring->sbq_clean_idx = 0;
3231 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003232 }
3233 switch (rx_ring->type) {
3234 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003235 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3236 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3237 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003238 case RX_Q:
3239 /* Inbound completion handling rx_rings run in
3240 * separate NAPI contexts.
3241 */
3242 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3243 64);
3244 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3245 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3246 break;
3247 default:
Joe Perchesae9540f72010-02-09 11:49:52 +00003248 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3249 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003250 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003251 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3252 CFG_LCQ, rx_ring->cq_id);
3253 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003254 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003255 return err;
3256 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003257 return err;
3258}
3259
3260static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3261{
3262 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3263 void __iomem *doorbell_area =
3264 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3265 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3266 (tx_ring->wq_id * sizeof(u64));
3267 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3268 (tx_ring->wq_id * sizeof(u64));
3269 int err = 0;
3270
3271 /*
3272 * Assign doorbell registers for this tx_ring.
3273 */
3274 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003275 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003276 tx_ring->prod_idx = 0;
3277 /* TX PCI doorbell mem area + 0x04 */
3278 tx_ring->valid_db_reg = doorbell_area + 0x04;
3279
3280 /*
3281 * Assign shadow registers for this tx_ring.
3282 */
3283 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3284 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3285
3286 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3287 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3288 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3289 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3290 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003291 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003292
Ron Mercer97345522009-01-09 11:31:50 +00003293 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003294
3295 ql_init_tx_ring(qdev, tx_ring);
3296
Ron Mercere3324712009-07-02 06:06:13 +00003297 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003298 (u16) tx_ring->wq_id);
3299 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003300 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003301 return err;
3302 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003303 return err;
3304}
3305
3306static void ql_disable_msix(struct ql_adapter *qdev)
3307{
3308 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3309 pci_disable_msix(qdev->pdev);
3310 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3311 kfree(qdev->msi_x_entry);
3312 qdev->msi_x_entry = NULL;
3313 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3314 pci_disable_msi(qdev->pdev);
3315 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3316 }
3317}
3318
Ron Mercera4ab6132009-08-27 11:02:10 +00003319/* We start by trying to get the number of vectors
3320 * stored in qdev->intr_count. If we don't get that
3321 * many then we reduce the count and try again.
3322 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003323static void ql_enable_msix(struct ql_adapter *qdev)
3324{
Ron Mercera4ab6132009-08-27 11:02:10 +00003325 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003326
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003327 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003328 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003329 /* Try to alloc space for the msix struct,
3330 * if it fails then go to MSI/legacy.
3331 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003332 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003333 sizeof(struct msix_entry),
3334 GFP_KERNEL);
3335 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003336 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003337 goto msi;
3338 }
3339
Ron Mercera4ab6132009-08-27 11:02:10 +00003340 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003341 qdev->msi_x_entry[i].entry = i;
3342
Ron Mercera4ab6132009-08-27 11:02:10 +00003343 /* Loop to get our vectors. We start with
3344 * what we want and settle for what we get.
3345 */
3346 do {
3347 err = pci_enable_msix(qdev->pdev,
3348 qdev->msi_x_entry, qdev->intr_count);
3349 if (err > 0)
3350 qdev->intr_count = err;
3351 } while (err > 0);
3352
3353 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003354 kfree(qdev->msi_x_entry);
3355 qdev->msi_x_entry = NULL;
Joe Perchesae9540f72010-02-09 11:49:52 +00003356 netif_warn(qdev, ifup, qdev->ndev,
3357 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003358 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003359 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003360 } else if (err == 0) {
3361 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003362 netif_info(qdev, ifup, qdev->ndev,
3363 "MSI-X Enabled, got %d vectors.\n",
3364 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003365 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003366 }
3367 }
3368msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003369 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003370 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003371 if (!pci_enable_msi(qdev->pdev)) {
3372 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f72010-02-09 11:49:52 +00003373 netif_info(qdev, ifup, qdev->ndev,
3374 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003375 return;
3376 }
3377 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003378 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f72010-02-09 11:49:52 +00003379 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3380 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003381}
3382
Ron Mercer39aa8162009-08-27 11:02:11 +00003383/* Each vector services 1 RSS ring and and 1 or more
3384 * TX completion rings. This function loops through
3385 * the TX completion rings and assigns the vector that
3386 * will service it. An example would be if there are
3387 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3388 * This would mean that vector 0 would service RSS ring 0
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003389 * and TX completion rings 0,1,2 and 3. Vector 1 would
Ron Mercer39aa8162009-08-27 11:02:11 +00003390 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3391 */
3392static void ql_set_tx_vect(struct ql_adapter *qdev)
3393{
3394 int i, j, vect;
3395 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3396
3397 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3398 /* Assign irq vectors to TX rx_rings.*/
3399 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3400 i < qdev->rx_ring_count; i++) {
3401 if (j == tx_rings_per_vector) {
3402 vect++;
3403 j = 0;
3404 }
3405 qdev->rx_ring[i].irq = vect;
3406 j++;
3407 }
3408 } else {
3409 /* For single vector all rings have an irq
3410 * of zero.
3411 */
3412 for (i = 0; i < qdev->rx_ring_count; i++)
3413 qdev->rx_ring[i].irq = 0;
3414 }
3415}
3416
3417/* Set the interrupt mask for this vector. Each vector
3418 * will service 1 RSS ring and 1 or more TX completion
3419 * rings. This function sets up a bit mask per vector
3420 * that indicates which rings it services.
3421 */
3422static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3423{
3424 int j, vect = ctx->intr;
3425 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3426
3427 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3428 /* Add the RSS ring serviced by this vector
3429 * to the mask.
3430 */
3431 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3432 /* Add the TX ring(s) serviced by this vector
3433 * to the mask. */
3434 for (j = 0; j < tx_rings_per_vector; j++) {
3435 ctx->irq_mask |=
3436 (1 << qdev->rx_ring[qdev->rss_ring_count +
3437 (vect * tx_rings_per_vector) + j].cq_id);
3438 }
3439 } else {
3440 /* For single vector we just shift each queue's
3441 * ID into the mask.
3442 */
3443 for (j = 0; j < qdev->rx_ring_count; j++)
3444 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3445 }
3446}
3447
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003448/*
3449 * Here we build the intr_context structures based on
3450 * our rx_ring count and intr vector count.
3451 * The intr_context structure is used to hook each vector
3452 * to possibly different handlers.
3453 */
3454static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3455{
3456 int i = 0;
3457 struct intr_context *intr_context = &qdev->intr_context[0];
3458
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003459 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3460 /* Each rx_ring has it's
3461 * own intr_context since we have separate
3462 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003463 */
3464 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3465 qdev->rx_ring[i].irq = i;
3466 intr_context->intr = i;
3467 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003468 /* Set up this vector's bit-mask that indicates
3469 * which queues it services.
3470 */
3471 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003472 /*
3473 * We set up each vectors enable/disable/read bits so
3474 * there's no bit/mask calculations in the critical path.
3475 */
3476 intr_context->intr_en_mask =
3477 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3478 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3479 | i;
3480 intr_context->intr_dis_mask =
3481 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3482 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3483 INTR_EN_IHD | i;
3484 intr_context->intr_read_mask =
3485 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3486 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3487 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003488 if (i == 0) {
3489 /* The first vector/queue handles
3490 * broadcast/multicast, fatal errors,
3491 * and firmware events. This in addition
3492 * to normal inbound NAPI processing.
3493 */
3494 intr_context->handler = qlge_isr;
3495 sprintf(intr_context->name, "%s-rx-%d",
3496 qdev->ndev->name, i);
3497 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003498 /*
3499 * Inbound queues handle unicast frames only.
3500 */
3501 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003502 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003503 qdev->ndev->name, i);
3504 }
3505 }
3506 } else {
3507 /*
3508 * All rx_rings use the same intr_context since
3509 * there is only one vector.
3510 */
3511 intr_context->intr = 0;
3512 intr_context->qdev = qdev;
3513 /*
3514 * We set up each vectors enable/disable/read bits so
3515 * there's no bit/mask calculations in the critical path.
3516 */
3517 intr_context->intr_en_mask =
3518 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3519 intr_context->intr_dis_mask =
3520 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3521 INTR_EN_TYPE_DISABLE;
3522 intr_context->intr_read_mask =
3523 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3524 /*
3525 * Single interrupt means one handler for all rings.
3526 */
3527 intr_context->handler = qlge_isr;
3528 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003529 /* Set up this vector's bit-mask that indicates
3530 * which queues it services. In this case there is
3531 * a single vector so it will service all RSS and
3532 * TX completion rings.
3533 */
3534 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003535 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003536 /* Tell the TX completion rings which MSIx vector
3537 * they will be using.
3538 */
3539 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003540}
3541
3542static void ql_free_irq(struct ql_adapter *qdev)
3543{
3544 int i;
3545 struct intr_context *intr_context = &qdev->intr_context[0];
3546
3547 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3548 if (intr_context->hooked) {
3549 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3550 free_irq(qdev->msi_x_entry[i].vector,
3551 &qdev->rx_ring[i]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003552 } else {
3553 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003554 }
3555 }
3556 }
3557 ql_disable_msix(qdev);
3558}
3559
3560static int ql_request_irq(struct ql_adapter *qdev)
3561{
3562 int i;
3563 int status = 0;
3564 struct pci_dev *pdev = qdev->pdev;
3565 struct intr_context *intr_context = &qdev->intr_context[0];
3566
3567 ql_resolve_queues_to_irqs(qdev);
3568
3569 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3570 atomic_set(&intr_context->irq_cnt, 0);
3571 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3572 status = request_irq(qdev->msi_x_entry[i].vector,
3573 intr_context->handler,
3574 0,
3575 intr_context->name,
3576 &qdev->rx_ring[i]);
3577 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003578 netif_err(qdev, ifup, qdev->ndev,
3579 "Failed request for MSIX interrupt %d.\n",
3580 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003581 goto err_irq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003582 }
3583 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00003584 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3585 "trying msi or legacy interrupts.\n");
3586 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3587 "%s: irq = %d.\n", __func__, pdev->irq);
3588 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3589 "%s: context->name = %s.\n", __func__,
3590 intr_context->name);
3591 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3592 "%s: dev_id = 0x%p.\n", __func__,
3593 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003594 status =
3595 request_irq(pdev->irq, qlge_isr,
3596 test_bit(QL_MSI_ENABLED,
3597 &qdev->
3598 flags) ? 0 : IRQF_SHARED,
3599 intr_context->name, &qdev->rx_ring[0]);
3600 if (status)
3601 goto err_irq;
3602
Joe Perchesae9540f72010-02-09 11:49:52 +00003603 netif_err(qdev, ifup, qdev->ndev,
3604 "Hooked intr %d, queue type %s, with name %s.\n",
3605 i,
3606 qdev->rx_ring[0].type == DEFAULT_Q ?
3607 "DEFAULT_Q" :
3608 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3609 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3610 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003611 }
3612 intr_context->hooked = 1;
3613 }
3614 return status;
3615err_irq:
Joe Perchesae9540f72010-02-09 11:49:52 +00003616 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003617 ql_free_irq(qdev);
3618 return status;
3619}
3620
3621static int ql_start_rss(struct ql_adapter *qdev)
3622{
Joe Perches215faf92010-12-21 02:16:10 -08003623 static const u8 init_hash_seed[] = {
3624 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3625 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3626 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3627 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3628 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3629 };
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003630 struct ricb *ricb = &qdev->ricb;
3631 int status = 0;
3632 int i;
3633 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3634
Ron Mercere3324712009-07-02 06:06:13 +00003635 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003636
Ron Mercerb2014ff2009-08-27 11:02:09 +00003637 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003638 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003639 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3640 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003641
3642 /*
3643 * Fill out the Indirection Table.
3644 */
Ron Mercer541ae282009-10-08 09:54:37 +00003645 for (i = 0; i < 1024; i++)
3646 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003647
Ron Mercer541ae282009-10-08 09:54:37 +00003648 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3649 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003650
Ron Mercere3324712009-07-02 06:06:13 +00003651 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003652 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003653 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003654 return status;
3655 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003656 return status;
3657}
3658
Ron Mercera5f59dc2009-07-02 06:06:07 +00003659static int ql_clear_routing_entries(struct ql_adapter *qdev)
3660{
3661 int i, status = 0;
3662
3663 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3664 if (status)
3665 return status;
3666 /* Clear all the entries in the routing table. */
3667 for (i = 0; i < 16; i++) {
3668 status = ql_set_routing_reg(qdev, i, 0, 0);
3669 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003670 netif_err(qdev, ifup, qdev->ndev,
3671 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003672 break;
3673 }
3674 }
3675 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3676 return status;
3677}
3678
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003679/* Initialize the frame-to-queue routing. */
3680static int ql_route_initialize(struct ql_adapter *qdev)
3681{
3682 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683
3684 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003685 status = ql_clear_routing_entries(qdev);
3686 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003687 return status;
3688
3689 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3690 if (status)
3691 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003692
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003693 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3694 RT_IDX_IP_CSUM_ERR, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003695 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003696 netif_err(qdev, ifup, qdev->ndev,
Ron Mercerfbc2ac32010-07-05 12:19:41 +00003697 "Failed to init routing register "
3698 "for IP CSUM error packets.\n");
3699 goto exit;
3700 }
3701 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3702 RT_IDX_TU_CSUM_ERR, 1);
3703 if (status) {
3704 netif_err(qdev, ifup, qdev->ndev,
3705 "Failed to init routing register "
3706 "for TCP/UDP CSUM error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003707 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003708 }
3709 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3710 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003711 netif_err(qdev, ifup, qdev->ndev,
3712 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003713 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003714 }
3715 /* If we have more than one inbound queue, then turn on RSS in the
3716 * routing block.
3717 */
3718 if (qdev->rss_ring_count > 1) {
3719 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3720 RT_IDX_RSS_MATCH, 1);
3721 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003722 netif_err(qdev, ifup, qdev->ndev,
3723 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003724 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003725 }
3726 }
3727
3728 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3729 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003730 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003731 netif_err(qdev, ifup, qdev->ndev,
3732 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003733exit:
3734 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003735 return status;
3736}
3737
Ron Mercer2ee1e272009-03-03 12:10:33 +00003738int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003739{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003740 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003741
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003742 /* If check if the link is up and use to
3743 * determine if we are setting or clearing
3744 * the MAC address in the CAM.
3745 */
3746 set = ql_read32(qdev, STS);
3747 set &= qdev->port_link_up;
3748 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003749 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003750 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003751 return status;
3752 }
3753
3754 status = ql_route_initialize(qdev);
3755 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003756 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003757
3758 return status;
3759}
3760
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003761static int ql_adapter_initialize(struct ql_adapter *qdev)
3762{
3763 u32 value, mask;
3764 int i;
3765 int status = 0;
3766
3767 /*
3768 * Set up the System register to halt on errors.
3769 */
3770 value = SYS_EFE | SYS_FAE;
3771 mask = value << 16;
3772 ql_write32(qdev, SYS, mask | value);
3773
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003774 /* Set the default queue, and VLAN behavior. */
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04003775 value = NIC_RCV_CFG_DFQ;
3776 mask = NIC_RCV_CFG_DFQ_MASK;
3777 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3778 value |= NIC_RCV_CFG_RV;
3779 mask |= (NIC_RCV_CFG_RV << 16);
3780 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003781 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3782
3783 /* Set the MPI interrupt to enabled. */
3784 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3785
3786 /* Enable the function, set pagesize, enable error checking. */
3787 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003788 FSC_EC | FSC_VM_PAGE_4K;
3789 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003790
3791 /* Set/clear header splitting. */
3792 mask = FSC_VM_PAGESIZE_MASK |
3793 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3794 ql_write32(qdev, FSC, mask | value);
3795
Ron Mercer572c5262010-01-02 10:37:42 +00003796 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003797
Ron Mercera3b71932009-10-08 09:54:38 +00003798 /* Set RX packet routing to use port/pci function on which the
3799 * packet arrived on in addition to usual frame routing.
3800 * This is helpful on bonding where both interfaces can have
3801 * the same MAC address.
3802 */
3803 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003804 /* Reroute all packets to our Interface.
3805 * They may have been routed to MPI firmware
3806 * due to WOL.
3807 */
3808 value = ql_read32(qdev, MGMT_RCV_CFG);
3809 value &= ~MGMT_RCV_CFG_RM;
3810 mask = 0xffff0000;
3811
3812 /* Sticky reg needs clearing due to WOL. */
3813 ql_write32(qdev, MGMT_RCV_CFG, mask);
3814 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3815
3816 /* Default WOL is enable on Mezz cards */
3817 if (qdev->pdev->subsystem_device == 0x0068 ||
3818 qdev->pdev->subsystem_device == 0x0180)
3819 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003820
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003821 /* Start up the rx queues. */
3822 for (i = 0; i < qdev->rx_ring_count; i++) {
3823 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3824 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003825 netif_err(qdev, ifup, qdev->ndev,
3826 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003827 return status;
3828 }
3829 }
3830
3831 /* If there is more than one inbound completion queue
3832 * then download a RICB to configure RSS.
3833 */
3834 if (qdev->rss_ring_count > 1) {
3835 status = ql_start_rss(qdev);
3836 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003837 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003838 return status;
3839 }
3840 }
3841
3842 /* Start up the tx queues. */
3843 for (i = 0; i < qdev->tx_ring_count; i++) {
3844 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3845 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003846 netif_err(qdev, ifup, qdev->ndev,
3847 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003848 return status;
3849 }
3850 }
3851
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003852 /* Initialize the port and set the max framesize. */
3853 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003854 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00003855 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003856
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003857 /* Set up the MAC address and frame routing filter. */
3858 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003859 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003860 netif_err(qdev, ifup, qdev->ndev,
3861 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003862 return status;
3863 }
3864
3865 /* Start NAPI for the RSS queues. */
Jitendra Kalsaria19257f52012-02-03 14:06:50 +00003866 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003867 napi_enable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003868
3869 return status;
3870}
3871
3872/* Issue soft reset to chip. */
3873static int ql_adapter_reset(struct ql_adapter *qdev)
3874{
3875 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003876 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003877 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003878
Ron Mercera5f59dc2009-07-02 06:06:07 +00003879 /* Clear all the entries in the routing table. */
3880 status = ql_clear_routing_entries(qdev);
3881 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003882 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003883 return status;
3884 }
3885
3886 end_jiffies = jiffies +
3887 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003888
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003889 /* Check if bit is set then skip the mailbox command and
3890 * clear the bit, else we are in normal reset process.
3891 */
3892 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3893 /* Stop management traffic. */
3894 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
Ron Mercer84087f42009-10-08 09:54:41 +00003895
Jitendra Kalsariada92b392011-06-30 10:02:05 +00003896 /* Wait for the NIC and MGMNT FIFOs to empty. */
3897 ql_wait_fifo_empty(qdev);
3898 } else
3899 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
Ron Mercer84087f42009-10-08 09:54:41 +00003900
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003901 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003902
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003903 do {
3904 value = ql_read32(qdev, RST_FO);
3905 if ((value & RST_FO_FR) == 0)
3906 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003907 cpu_relax();
3908 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003909
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003910 if (value & RST_FO_FR) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003911 netif_err(qdev, ifdown, qdev->ndev,
3912 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003913 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003914 }
3915
Ron Mercer84087f42009-10-08 09:54:41 +00003916 /* Resume management traffic. */
3917 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003918 return status;
3919}
3920
3921static void ql_display_dev_info(struct net_device *ndev)
3922{
Joe Perchesb16fed02010-11-15 11:12:28 +00003923 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003924
Joe Perchesae9540f72010-02-09 11:49:52 +00003925 netif_info(qdev, probe, qdev->ndev,
3926 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3927 "XG Roll = %d, XG Rev = %d.\n",
3928 qdev->func,
3929 qdev->port,
3930 qdev->chip_rev_id & 0x0000000f,
3931 qdev->chip_rev_id >> 4 & 0x0000000f,
3932 qdev->chip_rev_id >> 8 & 0x0000000f,
3933 qdev->chip_rev_id >> 12 & 0x0000000f);
3934 netif_info(qdev, probe, qdev->ndev,
3935 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003936}
3937
stephen hemmingerac409212010-10-21 07:50:54 +00003938static int ql_wol(struct ql_adapter *qdev)
Ron Mercerbc083ce2009-10-21 11:07:40 +00003939{
3940 int status = 0;
3941 u32 wol = MB_WOL_DISABLE;
3942
3943 /* The CAM is still intact after a reset, but if we
3944 * are doing WOL, then we may need to program the
3945 * routing regs. We would also need to issue the mailbox
3946 * commands to instruct the MPI what to do per the ethtool
3947 * settings.
3948 */
3949
3950 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3951 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003952 netif_err(qdev, ifdown, qdev->ndev,
Masanari Iidafd9071e2012-04-13 04:33:20 +00003953 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
Joe Perchesae9540f72010-02-09 11:49:52 +00003954 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003955 return -EINVAL;
3956 }
3957
3958 if (qdev->wol & WAKE_MAGIC) {
3959 status = ql_mb_wol_set_magic(qdev, 1);
3960 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00003961 netif_err(qdev, ifdown, qdev->ndev,
3962 "Failed to set magic packet on %s.\n",
3963 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003964 return status;
3965 } else
Joe Perchesae9540f72010-02-09 11:49:52 +00003966 netif_info(qdev, drv, qdev->ndev,
3967 "Enabled magic packet successfully on %s.\n",
3968 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003969
3970 wol |= MB_WOL_MAGIC_PKT;
3971 }
3972
3973 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003974 wol |= MB_WOL_MODE_ON;
3975 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f72010-02-09 11:49:52 +00003976 netif_err(qdev, drv, qdev->ndev,
3977 "WOL %s (wol code 0x%x) on %s\n",
Jiri Kosina318ae2e2010-03-08 16:55:37 +01003978 (status == 0) ? "Successfully set" : "Failed",
Joe Perchesae9540f72010-02-09 11:49:52 +00003979 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003980 }
3981
3982 return status;
3983}
3984
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003985static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003986{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003987
Ron Mercer6497b602009-02-12 16:37:13 -08003988 /* Don't kill the reset worker thread if we
3989 * are in the process of recovery.
3990 */
3991 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3992 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003993 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3994 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003995 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003996 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003997 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00003998}
3999
4000static int ql_adapter_down(struct ql_adapter *qdev)
4001{
4002 int i, status = 0;
4003
4004 ql_link_off(qdev);
4005
4006 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004007
Ron Mercer39aa8162009-08-27 11:02:11 +00004008 for (i = 0; i < qdev->rss_ring_count; i++)
4009 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004010
4011 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4012
4013 ql_disable_interrupts(qdev);
4014
4015 ql_tx_ring_clean(qdev);
4016
Ron Mercer6b318cb2009-03-09 10:59:26 +00004017 /* Call netif_napi_del() from common point.
4018 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004019 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00004020 netif_napi_del(&qdev->rx_ring[i].napi);
4021
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004022 status = ql_adapter_reset(qdev);
4023 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004024 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4025 qdev->func);
Breno Leitaofe5f0982010-08-26 08:27:58 +00004026 ql_free_rx_buffers(qdev);
4027
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004028 return status;
4029}
4030
4031static int ql_adapter_up(struct ql_adapter *qdev)
4032{
4033 int err = 0;
4034
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004035 err = ql_adapter_initialize(qdev);
4036 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004037 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004038 goto err_init;
4039 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004040 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00004041 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00004042 /* If the port is initialized and the
4043 * link is up the turn on the carrier.
4044 */
4045 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4046 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00004047 ql_link_on(qdev);
Ron Mercerf2c05002010-07-05 12:19:37 +00004048 /* Restore rx mode. */
4049 clear_bit(QL_ALLMULTI, &qdev->flags);
4050 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4051 qlge_set_multicast_list(qdev->ndev);
4052
Ron Mercerc1b60092010-10-27 04:58:12 +00004053 /* Restore vlan setting. */
4054 qlge_restore_vlan(qdev);
4055
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004056 ql_enable_interrupts(qdev);
4057 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00004058 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004059
4060 return 0;
4061err_init:
4062 ql_adapter_reset(qdev);
4063 return err;
4064}
4065
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004066static void ql_release_adapter_resources(struct ql_adapter *qdev)
4067{
4068 ql_free_mem_resources(qdev);
4069 ql_free_irq(qdev);
4070}
4071
4072static int ql_get_adapter_resources(struct ql_adapter *qdev)
4073{
4074 int status = 0;
4075
4076 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004077 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004078 return -ENOMEM;
4079 }
4080 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004081 return status;
4082}
4083
4084static int qlge_close(struct net_device *ndev)
4085{
4086 struct ql_adapter *qdev = netdev_priv(ndev);
4087
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004088 /* If we hit pci_channel_io_perm_failure
4089 * failure condition, then we already
4090 * brought the adapter down.
4091 */
4092 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004093 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004094 clear_bit(QL_EEH_FATAL, &qdev->flags);
4095 return 0;
4096 }
4097
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004098 /*
4099 * Wait for device to recover from a reset.
4100 * (Rarely happens, but possible.)
4101 */
4102 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4103 msleep(1);
4104 ql_adapter_down(qdev);
4105 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004106 return 0;
4107}
4108
4109static int ql_configure_rings(struct ql_adapter *qdev)
4110{
4111 int i;
4112 struct rx_ring *rx_ring;
4113 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00004114 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00004115 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4116 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4117
4118 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004119
Ron Mercera4ab6132009-08-27 11:02:10 +00004120 /* In a perfect world we have one RSS ring for each CPU
4121 * and each has it's own vector. To do that we ask for
4122 * cpu_cnt vectors. ql_enable_msix() will adjust the
4123 * vector count to what we actually get. We then
4124 * allocate an RSS ring for each.
4125 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004126 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004127 qdev->intr_count = cpu_cnt;
4128 ql_enable_msix(qdev);
4129 /* Adjust the RSS ring count to the actual vector count. */
4130 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004131 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004132 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004133
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004134 for (i = 0; i < qdev->tx_ring_count; i++) {
4135 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004136 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004137 tx_ring->qdev = qdev;
4138 tx_ring->wq_id = i;
4139 tx_ring->wq_len = qdev->tx_ring_size;
4140 tx_ring->wq_size =
4141 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4142
4143 /*
4144 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004145 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004146 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004147 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004148 }
4149
4150 for (i = 0; i < qdev->rx_ring_count; i++) {
4151 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004152 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004153 rx_ring->qdev = qdev;
4154 rx_ring->cq_id = i;
4155 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004156 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004157 /*
4158 * Inbound (RSS) queues.
4159 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004160 rx_ring->cq_len = qdev->rx_ring_size;
4161 rx_ring->cq_size =
4162 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4163 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4164 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004165 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004166 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004167 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4168 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004169 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004170 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004171 rx_ring->type = RX_Q;
4172 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004173 /*
4174 * Outbound queue handles outbound completions only.
4175 */
4176 /* outbound cq is same size as tx_ring it services. */
4177 rx_ring->cq_len = qdev->tx_ring_size;
4178 rx_ring->cq_size =
4179 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4180 rx_ring->lbq_len = 0;
4181 rx_ring->lbq_size = 0;
4182 rx_ring->lbq_buf_size = 0;
4183 rx_ring->sbq_len = 0;
4184 rx_ring->sbq_size = 0;
4185 rx_ring->sbq_buf_size = 0;
4186 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004187 }
4188 }
4189 return 0;
4190}
4191
4192static int qlge_open(struct net_device *ndev)
4193{
4194 int err = 0;
4195 struct ql_adapter *qdev = netdev_priv(ndev);
4196
Ron Mercer74e12432009-11-11 12:54:04 +00004197 err = ql_adapter_reset(qdev);
4198 if (err)
4199 return err;
4200
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004201 err = ql_configure_rings(qdev);
4202 if (err)
4203 return err;
4204
4205 err = ql_get_adapter_resources(qdev);
4206 if (err)
4207 goto error_up;
4208
4209 err = ql_adapter_up(qdev);
4210 if (err)
4211 goto error_up;
4212
4213 return err;
4214
4215error_up:
4216 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004217 return err;
4218}
4219
Ron Mercer7c734352009-10-19 03:32:19 +00004220static int ql_change_rx_buffers(struct ql_adapter *qdev)
4221{
4222 struct rx_ring *rx_ring;
4223 int i, status;
4224 u32 lbq_buf_len;
4225
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004226 /* Wait for an outstanding reset to complete. */
Ron Mercer7c734352009-10-19 03:32:19 +00004227 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4228 int i = 3;
4229 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004230 netif_err(qdev, ifup, qdev->ndev,
4231 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004232 ssleep(1);
4233 }
4234
4235 if (!i) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004236 netif_err(qdev, ifup, qdev->ndev,
4237 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004238 return -ETIMEDOUT;
4239 }
4240 }
4241
4242 status = ql_adapter_down(qdev);
4243 if (status)
4244 goto error;
4245
4246 /* Get the new rx buffer size. */
4247 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4248 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4249 qdev->lbq_buf_order = get_order(lbq_buf_len);
4250
4251 for (i = 0; i < qdev->rss_ring_count; i++) {
4252 rx_ring = &qdev->rx_ring[i];
4253 /* Set the new size. */
4254 rx_ring->lbq_buf_size = lbq_buf_len;
4255 }
4256
4257 status = ql_adapter_up(qdev);
4258 if (status)
4259 goto error;
4260
4261 return status;
4262error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004263 netif_alert(qdev, ifup, qdev->ndev,
4264 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004265 set_bit(QL_ADAPTER_UP, &qdev->flags);
4266 dev_close(qdev->ndev);
4267 return status;
4268}
4269
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004270static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4271{
4272 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004273 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004274
4275 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004276 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004277 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004278 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004279 } else
4280 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004281
4282 queue_delayed_work(qdev->workqueue,
4283 &qdev->mpi_port_cfg_work, 3*HZ);
4284
Breno Leitao746079d2010-02-04 10:11:19 +00004285 ndev->mtu = new_mtu;
4286
Ron Mercer7c734352009-10-19 03:32:19 +00004287 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004288 return 0;
4289 }
4290
Ron Mercer7c734352009-10-19 03:32:19 +00004291 status = ql_change_rx_buffers(qdev);
4292 if (status) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004293 netif_err(qdev, ifup, qdev->ndev,
4294 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004295 }
4296
4297 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004298}
4299
4300static struct net_device_stats *qlge_get_stats(struct net_device
4301 *ndev)
4302{
Ron Mercer885ee392009-11-03 13:49:31 +00004303 struct ql_adapter *qdev = netdev_priv(ndev);
4304 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4305 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4306 unsigned long pkts, mcast, dropped, errors, bytes;
4307 int i;
4308
4309 /* Get RX stats. */
4310 pkts = mcast = dropped = errors = bytes = 0;
4311 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4312 pkts += rx_ring->rx_packets;
4313 bytes += rx_ring->rx_bytes;
4314 dropped += rx_ring->rx_dropped;
4315 errors += rx_ring->rx_errors;
4316 mcast += rx_ring->rx_multicast;
4317 }
4318 ndev->stats.rx_packets = pkts;
4319 ndev->stats.rx_bytes = bytes;
4320 ndev->stats.rx_dropped = dropped;
4321 ndev->stats.rx_errors = errors;
4322 ndev->stats.multicast = mcast;
4323
4324 /* Get TX stats. */
4325 pkts = errors = bytes = 0;
4326 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4327 pkts += tx_ring->tx_packets;
4328 bytes += tx_ring->tx_bytes;
4329 errors += tx_ring->tx_errors;
4330 }
4331 ndev->stats.tx_packets = pkts;
4332 ndev->stats.tx_bytes = bytes;
4333 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004334 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004335}
4336
stephen hemmingerac409212010-10-21 07:50:54 +00004337static void qlge_set_multicast_list(struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004338{
Joe Perchesb16fed02010-11-15 11:12:28 +00004339 struct ql_adapter *qdev = netdev_priv(ndev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004340 struct netdev_hw_addr *ha;
Ron Mercercc288f52009-02-23 10:42:14 +00004341 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004342
Ron Mercercc288f52009-02-23 10:42:14 +00004343 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4344 if (status)
4345 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004346 /*
4347 * Set or clear promiscuous mode if a
4348 * transition is taking place.
4349 */
4350 if (ndev->flags & IFF_PROMISC) {
4351 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4352 if (ql_set_routing_reg
4353 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004354 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004355 "Failed to set promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004356 } else {
4357 set_bit(QL_PROMISCUOUS, &qdev->flags);
4358 }
4359 }
4360 } else {
4361 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4362 if (ql_set_routing_reg
4363 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004364 netif_err(qdev, hw, qdev->ndev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004365 "Failed to clear promiscuous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004366 } else {
4367 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4368 }
4369 }
4370 }
4371
4372 /*
4373 * Set or clear all multicast mode if a
4374 * transition is taking place.
4375 */
4376 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004377 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004378 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4379 if (ql_set_routing_reg
4380 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004381 netif_err(qdev, hw, qdev->ndev,
4382 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004383 } else {
4384 set_bit(QL_ALLMULTI, &qdev->flags);
4385 }
4386 }
4387 } else {
4388 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4389 if (ql_set_routing_reg
4390 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004391 netif_err(qdev, hw, qdev->ndev,
4392 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004393 } else {
4394 clear_bit(QL_ALLMULTI, &qdev->flags);
4395 }
4396 }
4397 }
4398
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004399 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004400 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4401 if (status)
4402 goto exit;
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004403 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004404 netdev_for_each_mc_addr(ha, ndev) {
4405 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004406 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004407 netif_err(qdev, hw, qdev->ndev,
4408 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004409 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004410 goto exit;
4411 }
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00004412 i++;
4413 }
Ron Mercercc288f52009-02-23 10:42:14 +00004414 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004415 if (ql_set_routing_reg
4416 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004417 netif_err(qdev, hw, qdev->ndev,
4418 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004419 } else {
4420 set_bit(QL_ALLMULTI, &qdev->flags);
4421 }
4422 }
4423exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004424 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004425}
4426
4427static int qlge_set_mac_address(struct net_device *ndev, void *p)
4428{
Joe Perchesb16fed02010-11-15 11:12:28 +00004429 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004430 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004431 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004432
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004433 if (!is_valid_ether_addr(addr->sa_data))
4434 return -EADDRNOTAVAIL;
4435 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
Ron Mercer801e9092010-02-17 06:41:22 +00004436 /* Update local copy of current mac address. */
4437 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004438
Ron Mercercc288f52009-02-23 10:42:14 +00004439 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4440 if (status)
4441 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004442 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4443 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004444 if (status)
Joe Perchesae9540f72010-02-09 11:49:52 +00004445 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004446 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4447 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004448}
4449
4450static void qlge_tx_timeout(struct net_device *ndev)
4451{
Joe Perchesb16fed02010-11-15 11:12:28 +00004452 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004453 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004454}
4455
4456static void ql_asic_reset_work(struct work_struct *work)
4457{
4458 struct ql_adapter *qdev =
4459 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004460 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004461 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004462 status = ql_adapter_down(qdev);
4463 if (status)
4464 goto error;
4465
4466 status = ql_adapter_up(qdev);
4467 if (status)
4468 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004469
4470 /* Restore rx mode. */
4471 clear_bit(QL_ALLMULTI, &qdev->flags);
4472 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4473 qlge_set_multicast_list(qdev->ndev);
4474
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004475 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004476 return;
4477error:
Joe Perchesae9540f72010-02-09 11:49:52 +00004478 netif_alert(qdev, ifup, qdev->ndev,
4479 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004480
Ron Mercerdb988122009-03-09 10:59:17 +00004481 set_bit(QL_ADAPTER_UP, &qdev->flags);
4482 dev_close(qdev->ndev);
4483 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004484}
4485
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004486static const struct nic_operations qla8012_nic_ops = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004487 .get_flash = ql_get_8012_flash_params,
4488 .port_initialize = ql_8012_port_initialize,
4489};
4490
stephen hemmingeref9c7ab2011-04-14 05:51:52 +00004491static const struct nic_operations qla8000_nic_ops = {
Ron Mercercdca8d02009-03-02 08:07:31 +00004492 .get_flash = ql_get_8000_flash_params,
4493 .port_initialize = ql_8000_port_initialize,
4494};
4495
Ron Mercere4552f52009-06-09 05:39:32 +00004496/* Find the pcie function number for the other NIC
4497 * on this chip. Since both NIC functions share a
4498 * common firmware we have the lowest enabled function
4499 * do any common work. Examples would be resetting
4500 * after a fatal firmware error, or doing a firmware
4501 * coredump.
4502 */
4503static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004504{
Ron Mercere4552f52009-06-09 05:39:32 +00004505 int status = 0;
4506 u32 temp;
4507 u32 nic_func1, nic_func2;
4508
4509 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4510 &temp);
4511 if (status)
4512 return status;
4513
4514 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4515 MPI_TEST_NIC_FUNC_MASK);
4516 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4517 MPI_TEST_NIC_FUNC_MASK);
4518
4519 if (qdev->func == nic_func1)
4520 qdev->alt_func = nic_func2;
4521 else if (qdev->func == nic_func2)
4522 qdev->alt_func = nic_func1;
4523 else
4524 status = -EIO;
4525
4526 return status;
4527}
4528
4529static int ql_get_board_info(struct ql_adapter *qdev)
4530{
4531 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004532 qdev->func =
4533 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004534 if (qdev->func > 3)
4535 return -EIO;
4536
4537 status = ql_get_alt_pcie_func(qdev);
4538 if (status)
4539 return status;
4540
4541 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4542 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004543 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4544 qdev->port_link_up = STS_PL1;
4545 qdev->port_init = STS_PI1;
4546 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4547 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4548 } else {
4549 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4550 qdev->port_link_up = STS_PL0;
4551 qdev->port_init = STS_PI0;
4552 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4553 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4554 }
4555 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004556 qdev->device_id = qdev->pdev->device;
4557 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4558 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004559 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4560 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004561 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004562}
4563
4564static void ql_release_all(struct pci_dev *pdev)
4565{
4566 struct net_device *ndev = pci_get_drvdata(pdev);
4567 struct ql_adapter *qdev = netdev_priv(ndev);
4568
4569 if (qdev->workqueue) {
4570 destroy_workqueue(qdev->workqueue);
4571 qdev->workqueue = NULL;
4572 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004573
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004574 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004575 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004576 if (qdev->doorbell_area)
4577 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004578 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004579 pci_release_regions(pdev);
4580 pci_set_drvdata(pdev, NULL);
4581}
4582
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004583static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4584 int cards_found)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004585{
4586 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004587 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004588
Ron Mercere3324712009-07-02 06:06:13 +00004589 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004590 err = pci_enable_device(pdev);
4591 if (err) {
4592 dev_err(&pdev->dev, "PCI device enable failed.\n");
4593 return err;
4594 }
4595
Ron Mercerebd6e772009-09-29 08:39:25 +00004596 qdev->ndev = ndev;
4597 qdev->pdev = pdev;
4598 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004599
Ron Mercerbc9167f2009-10-10 09:35:04 +00004600 /* Set PCIe read request size */
4601 err = pcie_set_readrq(pdev, 4096);
4602 if (err) {
4603 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004604 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004605 }
4606
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004607 err = pci_request_regions(pdev, DRV_NAME);
4608 if (err) {
4609 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004610 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004611 }
4612
4613 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004614 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004615 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004616 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004617 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004618 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004619 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004620 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004621 }
4622
4623 if (err) {
4624 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004625 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004626 }
4627
Ron Mercer73475332009-11-06 07:44:58 +00004628 /* Set PCIe reset type for EEH to fundamental. */
4629 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004630 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004631 qdev->reg_base =
4632 ioremap_nocache(pci_resource_start(pdev, 1),
4633 pci_resource_len(pdev, 1));
4634 if (!qdev->reg_base) {
4635 dev_err(&pdev->dev, "Register mapping failed.\n");
4636 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004637 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004638 }
4639
4640 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4641 qdev->doorbell_area =
4642 ioremap_nocache(pci_resource_start(pdev, 3),
4643 pci_resource_len(pdev, 3));
4644 if (!qdev->doorbell_area) {
4645 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4646 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004647 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004648 }
4649
Ron Mercere4552f52009-06-09 05:39:32 +00004650 err = ql_get_board_info(qdev);
4651 if (err) {
4652 dev_err(&pdev->dev, "Register access failed.\n");
4653 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004654 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004655 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004656 qdev->msg_enable = netif_msg_init(debug, default_msg);
4657 spin_lock_init(&qdev->hw_lock);
4658 spin_lock_init(&qdev->stats_lock);
4659
Ron Mercer8aae2602010-01-15 13:31:28 +00004660 if (qlge_mpi_coredump) {
4661 qdev->mpi_coredump =
4662 vmalloc(sizeof(struct ql_mpi_coredump));
4663 if (qdev->mpi_coredump == NULL) {
Ron Mercer8aae2602010-01-15 13:31:28 +00004664 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004665 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004666 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004667 if (qlge_force_coredump)
4668 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004669 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004670 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004671 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004672 if (err) {
4673 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004674 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004675 }
4676
Ron Mercer801e9092010-02-17 06:41:22 +00004677 /* Keep local copy of current mac address. */
4678 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004679
4680 /* Set up the default ring sizes. */
4681 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4682 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4683
4684 /* Set up the coalescing parameters. */
4685 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4686 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4687 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4688 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4689
4690 /*
4691 * Set up the operating parameters.
4692 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004693 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4694 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4695 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4696 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004697 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004698 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004699 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004700 init_completion(&qdev->ide_completion);
Ron Mercer4d7b6b52010-12-11 11:06:50 +00004701 mutex_init(&qdev->mpi_mutex);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004702
4703 if (!cards_found) {
4704 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4705 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4706 DRV_NAME, DRV_VERSION);
4707 }
4708 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004709err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004710 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004711err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004712 pci_disable_device(pdev);
4713 return err;
4714}
4715
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004716static const struct net_device_ops qlge_netdev_ops = {
4717 .ndo_open = qlge_open,
4718 .ndo_stop = qlge_close,
4719 .ndo_start_xmit = qlge_send,
4720 .ndo_change_mtu = qlge_change_mtu,
4721 .ndo_get_stats = qlge_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00004722 .ndo_set_rx_mode = qlge_set_multicast_list,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004723 .ndo_set_mac_address = qlge_set_mac_address,
4724 .ndo_validate_addr = eth_validate_addr,
4725 .ndo_tx_timeout = qlge_tx_timeout,
Jiri Pirko18c49b92011-07-21 03:24:11 +00004726 .ndo_fix_features = qlge_fix_features,
4727 .ndo_set_features = qlge_set_features,
Ron Mercer01e6b952009-10-30 12:13:34 +00004728 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4729 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004730};
4731
Ron Mercer15c052f2010-02-04 13:32:46 -08004732static void ql_timer(unsigned long data)
4733{
4734 struct ql_adapter *qdev = (struct ql_adapter *)data;
4735 u32 var = 0;
4736
4737 var = ql_read32(qdev, STS);
4738 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004739 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004740 return;
4741 }
4742
Breno Leitao72046d82010-07-01 03:00:17 +00004743 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercer15c052f2010-02-04 13:32:46 -08004744}
4745
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004746static int qlge_probe(struct pci_dev *pdev,
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004747 const struct pci_device_id *pci_entry)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004748{
4749 struct net_device *ndev = NULL;
4750 struct ql_adapter *qdev = NULL;
4751 static int cards_found = 0;
4752 int err = 0;
4753
Ron Mercer1e213302009-03-09 10:59:21 +00004754 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
Yuval Mintz9eb87382012-07-01 03:18:53 +00004755 min(MAX_CPUS, netif_get_num_default_rss_queues()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004756 if (!ndev)
4757 return -ENOMEM;
4758
4759 err = ql_init_device(pdev, ndev, cards_found);
4760 if (err < 0) {
4761 free_netdev(ndev);
4762 return err;
4763 }
4764
4765 qdev = netdev_priv(ndev);
4766 SET_NETDEV_DEV(ndev, &pdev->dev);
Jitendra Kalsariaa45adbe2013-09-27 13:17:46 -04004767 ndev->hw_features = NETIF_F_SG |
4768 NETIF_F_IP_CSUM |
4769 NETIF_F_TSO |
4770 NETIF_F_TSO_ECN |
4771 NETIF_F_HW_VLAN_CTAG_TX |
4772 NETIF_F_HW_VLAN_CTAG_RX |
4773 NETIF_F_HW_VLAN_CTAG_FILTER |
4774 NETIF_F_RXCSUM;
4775 ndev->features = ndev->hw_features;
brenohl@br.ibm.com1a0150a92012-07-27 08:54:52 +00004776 ndev->vlan_features = ndev->hw_features;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004777
4778 if (test_bit(QL_DMA64, &qdev->flags))
4779 ndev->features |= NETIF_F_HIGHDMA;
4780
4781 /*
4782 * Set up net_device structure.
4783 */
4784 ndev->tx_queue_len = qdev->tx_ring_size;
4785 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004786
4787 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004788 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004789 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004790
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004791 err = register_netdev(ndev);
4792 if (err) {
4793 dev_err(&pdev->dev, "net device registration failed.\n");
4794 ql_release_all(pdev);
4795 pci_disable_device(pdev);
Wei Yongjun4d2593c2013-05-22 23:09:50 +00004796 free_netdev(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004797 return err;
4798 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004799 /* Start up the timer to trigger EEH if
4800 * the bus goes dead
4801 */
4802 init_timer_deferrable(&qdev->timer);
4803 qdev->timer.data = (unsigned long)qdev;
4804 qdev->timer.function = ql_timer;
4805 qdev->timer.expires = jiffies + (5*HZ);
4806 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004807 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004808 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004809 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004810 cards_found++;
4811 return 0;
4812}
4813
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004814netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4815{
4816 return qlge_send(skb, ndev);
4817}
4818
4819int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4820{
4821 return ql_clean_inbound_rx_ring(rx_ring, budget);
4822}
4823
Bill Pemberton5d8e8722012-12-03 09:23:27 -05004824static void qlge_remove(struct pci_dev *pdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004825{
4826 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004827 struct ql_adapter *qdev = netdev_priv(ndev);
4828 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004829 ql_cancel_all_work_sync(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004830 unregister_netdev(ndev);
4831 ql_release_all(pdev);
4832 pci_disable_device(pdev);
4833 free_netdev(ndev);
4834}
4835
Ron Mercer6d190c62009-10-28 08:39:20 +00004836/* Clean up resources without touching hardware. */
4837static void ql_eeh_close(struct net_device *ndev)
4838{
4839 int i;
4840 struct ql_adapter *qdev = netdev_priv(ndev);
4841
4842 if (netif_carrier_ok(ndev)) {
4843 netif_carrier_off(ndev);
4844 netif_stop_queue(ndev);
4845 }
4846
Breno Leitao7ae80ab2010-07-01 03:00:18 +00004847 /* Disabling the timer */
4848 del_timer_sync(&qdev->timer);
Breno Leitaoc5daddd2010-08-24 12:50:40 +00004849 ql_cancel_all_work_sync(qdev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004850
4851 for (i = 0; i < qdev->rss_ring_count; i++)
4852 netif_napi_del(&qdev->rx_ring[i].napi);
4853
4854 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4855 ql_tx_ring_clean(qdev);
4856 ql_free_rx_buffers(qdev);
4857 ql_release_adapter_resources(qdev);
4858}
4859
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004860/*
4861 * This callback is called by the PCI subsystem whenever
4862 * a PCI bus error is detected.
4863 */
4864static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4865 enum pci_channel_state state)
4866{
4867 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004868 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004869
Ron Mercer6d190c62009-10-28 08:39:20 +00004870 switch (state) {
4871 case pci_channel_io_normal:
4872 return PCI_ERS_RESULT_CAN_RECOVER;
4873 case pci_channel_io_frozen:
4874 netif_device_detach(ndev);
4875 if (netif_running(ndev))
4876 ql_eeh_close(ndev);
4877 pci_disable_device(pdev);
4878 return PCI_ERS_RESULT_NEED_RESET;
4879 case pci_channel_io_perm_failure:
4880 dev_err(&pdev->dev,
4881 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004882 ql_eeh_close(ndev);
4883 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004884 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004885 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004886
4887 /* Request a slot reset. */
4888 return PCI_ERS_RESULT_NEED_RESET;
4889}
4890
4891/*
4892 * This callback is called after the PCI buss has been reset.
4893 * Basically, this tries to restart the card from scratch.
4894 * This is a shortened version of the device probe/discovery code,
4895 * it resembles the first-half of the () routine.
4896 */
4897static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4898{
4899 struct net_device *ndev = pci_get_drvdata(pdev);
4900 struct ql_adapter *qdev = netdev_priv(ndev);
4901
Ron Mercer6d190c62009-10-28 08:39:20 +00004902 pdev->error_state = pci_channel_io_normal;
4903
4904 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004905 if (pci_enable_device(pdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004906 netif_err(qdev, ifup, qdev->ndev,
4907 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004908 return PCI_ERS_RESULT_DISCONNECT;
4909 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004910 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004911
4912 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004913 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004914 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004915 return PCI_ERS_RESULT_DISCONNECT;
4916 }
4917
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004918 return PCI_ERS_RESULT_RECOVERED;
4919}
4920
4921static void qlge_io_resume(struct pci_dev *pdev)
4922{
4923 struct net_device *ndev = pci_get_drvdata(pdev);
4924 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004925 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004926
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004927 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004928 err = qlge_open(ndev);
4929 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004930 netif_err(qdev, ifup, qdev->ndev,
4931 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004932 return;
4933 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004934 } else {
Joe Perchesae9540f72010-02-09 11:49:52 +00004935 netif_err(qdev, ifup, qdev->ndev,
4936 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004937 }
Breno Leitao72046d82010-07-01 03:00:17 +00004938 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004939 netif_device_attach(ndev);
4940}
4941
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004942static const struct pci_error_handlers qlge_err_handler = {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004943 .error_detected = qlge_io_error_detected,
4944 .slot_reset = qlge_io_slot_reset,
4945 .resume = qlge_io_resume,
4946};
4947
4948static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4949{
4950 struct net_device *ndev = pci_get_drvdata(pdev);
4951 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004952 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004953
4954 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004955 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004956
4957 if (netif_running(ndev)) {
4958 err = ql_adapter_down(qdev);
4959 if (!err)
4960 return err;
4961 }
4962
Ron Mercerbc083ce2009-10-21 11:07:40 +00004963 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004964 err = pci_save_state(pdev);
4965 if (err)
4966 return err;
4967
4968 pci_disable_device(pdev);
4969
4970 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4971
4972 return 0;
4973}
4974
David S. Miller04da2cf2008-09-19 16:14:24 -07004975#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004976static int qlge_resume(struct pci_dev *pdev)
4977{
4978 struct net_device *ndev = pci_get_drvdata(pdev);
4979 struct ql_adapter *qdev = netdev_priv(ndev);
4980 int err;
4981
4982 pci_set_power_state(pdev, PCI_D0);
4983 pci_restore_state(pdev);
4984 err = pci_enable_device(pdev);
4985 if (err) {
Joe Perchesae9540f72010-02-09 11:49:52 +00004986 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004987 return err;
4988 }
4989 pci_set_master(pdev);
4990
4991 pci_enable_wake(pdev, PCI_D3hot, 0);
4992 pci_enable_wake(pdev, PCI_D3cold, 0);
4993
4994 if (netif_running(ndev)) {
4995 err = ql_adapter_up(qdev);
4996 if (err)
4997 return err;
4998 }
4999
Breno Leitao72046d82010-07-01 03:00:17 +00005000 mod_timer(&qdev->timer, jiffies + (5*HZ));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005001 netif_device_attach(ndev);
5002
5003 return 0;
5004}
David S. Miller04da2cf2008-09-19 16:14:24 -07005005#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005006
5007static void qlge_shutdown(struct pci_dev *pdev)
5008{
5009 qlge_suspend(pdev, PMSG_SUSPEND);
5010}
5011
5012static struct pci_driver qlge_driver = {
5013 .name = DRV_NAME,
5014 .id_table = qlge_pci_tbl,
5015 .probe = qlge_probe,
Bill Pemberton5d8e8722012-12-03 09:23:27 -05005016 .remove = qlge_remove,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04005017#ifdef CONFIG_PM
5018 .suspend = qlge_suspend,
5019 .resume = qlge_resume,
5020#endif
5021 .shutdown = qlge_shutdown,
5022 .err_handler = &qlge_err_handler
5023};
5024
Peter Hüwe70a611d2013-05-21 12:58:08 +00005025module_pci_driver(qlge_driver);