blob: 8d4581bdba3c3f699536b51d701f239e855a4a37 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Matt Carlson9e056c02012-02-13 15:20:17 +00007 * Copyright (C) 2005-2012 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
Matt Carlson6867c842010-07-11 09:31:44 +000021#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020027#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000029#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
Matt Carlson3110f5f52010-12-06 08:28:50 +000036#include <linux/mdio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070038#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070039#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070044#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020045#include <linux/dma-mapping.h>
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -080046#include <linux/firmware.h>
Michael Chanaed93e02012-07-16 16:24:02 +000047#include <linux/hwmon.h>
48#include <linux/hwmon-sysfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030051#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Javier Martinez Canillas27fd9de2011-03-26 16:42:31 +000053#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#include <asm/byteorder.h>
Javier Martinez Canillas27fd9de2011-03-26 16:42:31 +000055#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David S. Miller49b6e95f2007-03-29 01:38:42 -070057#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070059#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#endif
61
Matt Carlson63532392008-11-03 16:49:57 -080062#define BAR_0 0
63#define BAR_2 2
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#include "tg3.h"
66
Joe Perches63c3a662011-04-26 08:12:10 +000067/* Functions & macros to verify TG3_FLAGS types */
68
69static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70{
71 return test_bit(flag, bits);
72}
73
74static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75{
76 set_bit(flag, bits);
77}
78
79static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80{
81 clear_bit(flag, bits);
82}
83
84#define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86#define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88#define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#define DRV_MODULE_NAME "tg3"
Matt Carlson6867c842010-07-11 09:31:44 +000092#define TG3_MAJ_NUM 3
Nithin Nayak Sujirbd473da2012-11-05 14:26:30 +000093#define TG3_MIN_NUM 126
Matt Carlson6867c842010-07-11 09:31:44 +000094#define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
Nithin Nayak Sujirbd473da2012-11-05 14:26:30 +000096#define DRV_MODULE_RELDATE "November 05, 2012"
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Matt Carlsonfd6d3f02011-08-31 11:44:52 +000098#define RESET_KIND_SHUTDOWN 0
99#define RESET_KIND_INIT 1
100#define RESET_KIND_SUSPEND 2
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#define TG3_DEF_RX_MODE 0
103#define TG3_DEF_TX_MODE 0
104#define TG3_DEF_MSG_ENABLE \
105 (NETIF_MSG_DRV | \
106 NETIF_MSG_PROBE | \
107 NETIF_MSG_LINK | \
108 NETIF_MSG_TIMER | \
109 NETIF_MSG_IFDOWN | \
110 NETIF_MSG_IFUP | \
111 NETIF_MSG_RX_ERR | \
112 NETIF_MSG_TX_ERR)
113
Matt Carlson520b2752011-06-13 13:39:02 +0000114#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/* length of time before we decide the hardware is borked,
117 * and dev->tx_timeout() should be called to fix the problem
118 */
Joe Perches63c3a662011-04-26 08:12:10 +0000119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120#define TG3_TX_TIMEOUT (5 * HZ)
121
122/* hardware minimum and maximum for a single frame's data payload */
123#define TG3_MIN_MTU 60
124#define TG3_MAX_MTU(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127/* These numbers seem to be hard coded in the NIC firmware somehow.
128 * You can't change the ring sizes, but you can change where you place
129 * them in the NIC onboard memory.
130 */
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000131#define TG3_RX_STD_RING_SIZE(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
Matt Carlsonde9f5232011-04-05 14:22:43 +0000133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134#define TG3_DEF_RX_RING_PENDING 200
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000135#define TG3_RX_JMB_RING_SIZE(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
Matt Carlsonde9f5232011-04-05 14:22:43 +0000137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#define TG3_DEF_RX_JUMBO_RING_PENDING 100
139
140/* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
145 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147#define TG3_TX_RING_SIZE 512
148#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149
Matt Carlson2c49a442010-09-30 10:34:35 +0000150#define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152#define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154#define TG3_RX_RCB_RING_BYTES(tp) \
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
Matt Carlson287be122009-08-28 13:58:46 +0000160#define TG3_DMA_BYTE_ENAB 64
161
162#define TG3_RX_STD_DMA_SZ 1536
163#define TG3_RX_JMB_DMA_SZ 9046
164
165#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166
167#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Matt Carlson2c49a442010-09-30 10:34:35 +0000170#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
Matt Carlson2b2cdb62009-11-13 13:03:48 +0000172
Matt Carlson2c49a442010-09-30 10:34:35 +0000173#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
Matt Carlson2b2cdb62009-11-13 13:03:48 +0000175
Matt Carlsond2757fc2010-04-12 06:58:27 +0000176/* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
180 *
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
186 */
187#define TG3_RX_COPY_THRESHOLD 256
188#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190#else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192#endif
193
Matt Carlson81389f52011-08-31 11:44:49 +0000194#if (NET_IP_ALIGN != 0)
195#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196#else
Eric Dumazet9205fd92011-11-18 06:47:01 +0000197#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
Matt Carlson81389f52011-08-31 11:44:49 +0000198#endif
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/* minimum number of free TX descriptors required to wake up TX process */
Matt Carlsonf3f3f272009-08-28 14:03:21 +0000201#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
Matt Carlson55086ad2011-12-14 11:09:59 +0000202#define TG3_TX_BD_DMA_MAX_2K 2048
Matt Carlsona4cb4282011-12-14 11:09:58 +0000203#define TG3_TX_BD_DMA_MAX_4K 4096
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Matt Carlsonad829262008-11-21 17:16:16 -0800205#define TG3_RAW_IP_ALIGN 2
206
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000207#define TG3_FW_UPDATE_TIMEOUT_SEC 5
Matt Carlson21f76382012-02-22 12:35:21 +0000208#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000209
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -0800210#define FIRMWARE_TG3 "tigon/tg3.bin"
211#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214static char version[] __devinitdata =
Joe Perches05dbe002010-02-17 19:44:19 +0000215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_MODULE_VERSION);
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -0800221MODULE_FIRMWARE(FIRMWARE_TG3);
222MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226module_param(tg3_debug, int, 0);
227MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000229#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
230#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
231
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000232static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
252 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253 TG3_DRV_DATA_FLAG_5705_10_100},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000280 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
281 PCI_VENDOR_ID_LENOVO,
282 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson2befdce2009-08-28 12:28:45 +0000303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000305 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
306 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
307 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson321d32a2008-11-21 17:22:19 -0800311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
314 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson5e7ccf22009-08-25 10:08:42 +0000315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
Matt Carlson5001e2f2009-11-13 13:03:51 +0000316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
Michael Chan79d49692012-11-05 14:26:29 +0000317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
Matt Carlson5001e2f2009-11-13 13:03:51 +0000318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
Matt Carlsonb0f75222010-01-20 16:58:11 +0000319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
324 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson302b5002010-06-05 17:24:38 +0000327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
Matt Carlsonba1f3c72011-04-05 14:22:50 +0000328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
Greg KH02eca3f2012-07-12 15:39:44 +0000329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700330 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
331 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
332 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
333 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
334 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
336 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
Meelis Roos1dcb14d2011-05-25 05:43:47 +0000337 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700338 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339};
340
341MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
342
Andreas Mohr50da8592006-08-14 23:54:30 -0700343static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 const char string[ETH_GSTRING_LEN];
Matt Carlson48fa55a2011-04-13 11:05:06 +0000345} ethtool_stats_keys[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 { "rx_octets" },
347 { "rx_fragments" },
348 { "rx_ucast_packets" },
349 { "rx_mcast_packets" },
350 { "rx_bcast_packets" },
351 { "rx_fcs_errors" },
352 { "rx_align_errors" },
353 { "rx_xon_pause_rcvd" },
354 { "rx_xoff_pause_rcvd" },
355 { "rx_mac_ctrl_rcvd" },
356 { "rx_xoff_entered" },
357 { "rx_frame_too_long_errors" },
358 { "rx_jabbers" },
359 { "rx_undersize_packets" },
360 { "rx_in_length_errors" },
361 { "rx_out_length_errors" },
362 { "rx_64_or_less_octet_packets" },
363 { "rx_65_to_127_octet_packets" },
364 { "rx_128_to_255_octet_packets" },
365 { "rx_256_to_511_octet_packets" },
366 { "rx_512_to_1023_octet_packets" },
367 { "rx_1024_to_1522_octet_packets" },
368 { "rx_1523_to_2047_octet_packets" },
369 { "rx_2048_to_4095_octet_packets" },
370 { "rx_4096_to_8191_octet_packets" },
371 { "rx_8192_to_9022_octet_packets" },
372
373 { "tx_octets" },
374 { "tx_collisions" },
375
376 { "tx_xon_sent" },
377 { "tx_xoff_sent" },
378 { "tx_flow_control" },
379 { "tx_mac_errors" },
380 { "tx_single_collisions" },
381 { "tx_mult_collisions" },
382 { "tx_deferred" },
383 { "tx_excessive_collisions" },
384 { "tx_late_collisions" },
385 { "tx_collide_2times" },
386 { "tx_collide_3times" },
387 { "tx_collide_4times" },
388 { "tx_collide_5times" },
389 { "tx_collide_6times" },
390 { "tx_collide_7times" },
391 { "tx_collide_8times" },
392 { "tx_collide_9times" },
393 { "tx_collide_10times" },
394 { "tx_collide_11times" },
395 { "tx_collide_12times" },
396 { "tx_collide_13times" },
397 { "tx_collide_14times" },
398 { "tx_collide_15times" },
399 { "tx_ucast_packets" },
400 { "tx_mcast_packets" },
401 { "tx_bcast_packets" },
402 { "tx_carrier_sense_errors" },
403 { "tx_discards" },
404 { "tx_errors" },
405
406 { "dma_writeq_full" },
407 { "dma_write_prioq_full" },
408 { "rxbds_empty" },
409 { "rx_discards" },
410 { "rx_errors" },
411 { "rx_threshold_hit" },
412
413 { "dma_readq_full" },
414 { "dma_read_prioq_full" },
415 { "tx_comp_queue_full" },
416
417 { "ring_set_send_prod_index" },
418 { "ring_status_update" },
419 { "nic_irqs" },
420 { "nic_avoided_irqs" },
Matt Carlson4452d092011-05-19 12:12:51 +0000421 { "nic_tx_threshold_hit" },
422
423 { "mbuf_lwm_thresh_hit" },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424};
425
Matt Carlson48fa55a2011-04-13 11:05:06 +0000426#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
427
428
Andreas Mohr50da8592006-08-14 23:54:30 -0700429static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700430 const char string[ETH_GSTRING_LEN];
Matt Carlson48fa55a2011-04-13 11:05:06 +0000431} ethtool_test_keys[] = {
Matt Carlson28a45952011-08-19 13:58:22 +0000432 { "nvram test (online) " },
433 { "link test (online) " },
434 { "register test (offline)" },
435 { "memory test (offline)" },
436 { "mac loopback test (offline)" },
437 { "phy loopback test (offline)" },
Matt Carlson941ec902011-08-19 13:58:23 +0000438 { "ext loopback test (offline)" },
Matt Carlson28a45952011-08-19 13:58:22 +0000439 { "interrupt test (offline)" },
Michael Chan4cafd3f2005-05-29 14:56:34 -0700440};
441
Matt Carlson48fa55a2011-04-13 11:05:06 +0000442#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
443
444
Michael Chanb401e9e2005-12-19 16:27:04 -0800445static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
446{
447 writel(val, tp->regs + off);
448}
449
450static u32 tg3_read32(struct tg3 *tp, u32 off)
451{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000452 return readl(tp->regs + off);
Michael Chanb401e9e2005-12-19 16:27:04 -0800453}
454
Matt Carlson0d3031d2007-10-10 18:02:43 -0700455static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
456{
457 writel(val, tp->aperegs + off);
458}
459
460static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
461{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000462 return readl(tp->aperegs + off);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700463}
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
466{
Michael Chan68929142005-08-09 20:17:14 -0700467 unsigned long flags;
468
469 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700470 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
471 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700472 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700473}
474
475static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
476{
477 writel(val, tp->regs + off);
478 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480
Michael Chan68929142005-08-09 20:17:14 -0700481static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
482{
483 unsigned long flags;
484 u32 val;
485
486 spin_lock_irqsave(&tp->indirect_lock, flags);
487 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 return val;
491}
492
493static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
494{
495 unsigned long flags;
496
497 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
498 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
499 TG3_64BIT_REG_LOW, val);
500 return;
501 }
Matt Carlson66711e62009-11-13 13:03:49 +0000502 if (off == TG3_RX_STD_PROD_IDX_REG) {
Michael Chan68929142005-08-09 20:17:14 -0700503 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
504 TG3_64BIT_REG_LOW, val);
505 return;
506 }
507
508 spin_lock_irqsave(&tp->indirect_lock, flags);
509 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
511 spin_unlock_irqrestore(&tp->indirect_lock, flags);
512
513 /* In indirect mode when disabling interrupts, we also need
514 * to clear the interrupt bit in the GRC local ctrl register.
515 */
516 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
517 (val == 0x1)) {
518 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
519 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
520 }
521}
522
523static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
524{
525 unsigned long flags;
526 u32 val;
527
528 spin_lock_irqsave(&tp->indirect_lock, flags);
529 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
530 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
531 spin_unlock_irqrestore(&tp->indirect_lock, flags);
532 return val;
533}
534
Michael Chanb401e9e2005-12-19 16:27:04 -0800535/* usec_wait specifies the wait time in usec when writing to certain registers
536 * where it is unsafe to read back the register without some delay.
537 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
538 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
539 */
540static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541{
Joe Perches63c3a662011-04-26 08:12:10 +0000542 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
Michael Chanb401e9e2005-12-19 16:27:04 -0800543 /* Non-posted methods */
544 tp->write32(tp, off, val);
545 else {
546 /* Posted method */
547 tg3_write32(tp, off, val);
548 if (usec_wait)
549 udelay(usec_wait);
550 tp->read32(tp, off);
551 }
552 /* Wait again after the read for the posted method to guarantee that
553 * the wait time is met.
554 */
555 if (usec_wait)
556 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
Michael Chan09ee9292005-08-09 20:17:00 -0700559static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
560{
561 tp->write32_mbox(tp, off, val);
Joe Perches63c3a662011-04-26 08:12:10 +0000562 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
Michael Chan68929142005-08-09 20:17:14 -0700563 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700564}
565
Michael Chan20094932005-08-09 20:16:32 -0700566static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
568 void __iomem *mbox = tp->regs + off;
569 writel(val, mbox);
Joe Perches63c3a662011-04-26 08:12:10 +0000570 if (tg3_flag(tp, TXD_MBOX_HWBUG))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 writel(val, mbox);
Joe Perches63c3a662011-04-26 08:12:10 +0000572 if (tg3_flag(tp, MBOX_WRITE_REORDER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 readl(mbox);
574}
575
Michael Chanb5d37722006-09-27 16:06:21 -0700576static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
577{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000578 return readl(tp->regs + off + GRCMBOX_BASE);
Michael Chanb5d37722006-09-27 16:06:21 -0700579}
580
581static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
582{
583 writel(val, tp->regs + off + GRCMBOX_BASE);
584}
585
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000586#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700587#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000588#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
589#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
590#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700591
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000592#define tw32(reg, val) tp->write32(tp, reg, val)
593#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
594#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
595#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
598{
Michael Chan68929142005-08-09 20:17:14 -0700599 unsigned long flags;
600
Matt Carlson6ff6f812011-05-19 12:12:54 +0000601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
Michael Chanb5d37722006-09-27 16:06:21 -0700602 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
603 return;
604
Michael Chan68929142005-08-09 20:17:14 -0700605 spin_lock_irqsave(&tp->indirect_lock, flags);
Joe Perches63c3a662011-04-26 08:12:10 +0000606 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
Michael Chanbbadf502006-04-06 21:46:34 -0700607 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
608 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Michael Chanbbadf502006-04-06 21:46:34 -0700610 /* Always leave this as zero. */
611 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
612 } else {
613 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
614 tw32_f(TG3PCI_MEM_WIN_DATA, val);
615
616 /* Always leave this as zero. */
617 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
618 }
Michael Chan68929142005-08-09 20:17:14 -0700619 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621
622static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
623{
Michael Chan68929142005-08-09 20:17:14 -0700624 unsigned long flags;
625
Matt Carlson6ff6f812011-05-19 12:12:54 +0000626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
Michael Chanb5d37722006-09-27 16:06:21 -0700627 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
628 *val = 0;
629 return;
630 }
631
Michael Chan68929142005-08-09 20:17:14 -0700632 spin_lock_irqsave(&tp->indirect_lock, flags);
Joe Perches63c3a662011-04-26 08:12:10 +0000633 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
Michael Chanbbadf502006-04-06 21:46:34 -0700634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
635 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
Michael Chanbbadf502006-04-06 21:46:34 -0700637 /* Always leave this as zero. */
638 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 } else {
640 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
641 *val = tr32(TG3PCI_MEM_WIN_DATA);
642
643 /* Always leave this as zero. */
644 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 }
Michael Chan68929142005-08-09 20:17:14 -0700646 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
648
Matt Carlson0d3031d2007-10-10 18:02:43 -0700649static void tg3_ape_lock_init(struct tg3 *tp)
650{
651 int i;
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000652 u32 regbase, bit;
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000653
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
655 regbase = TG3_APE_LOCK_GRANT;
656 else
657 regbase = TG3_APE_PER_LOCK_GRANT;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700658
659 /* Make sure the driver hasn't any stale locks. */
Matt Carlson78f94dc2011-11-04 09:14:58 +0000660 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
661 switch (i) {
662 case TG3_APE_LOCK_PHY0:
663 case TG3_APE_LOCK_PHY1:
664 case TG3_APE_LOCK_PHY2:
665 case TG3_APE_LOCK_PHY3:
666 bit = APE_LOCK_GRANT_DRIVER;
667 break;
668 default:
669 if (!tp->pci_fn)
670 bit = APE_LOCK_GRANT_DRIVER;
671 else
672 bit = 1 << tp->pci_fn;
673 }
674 tg3_ape_write32(tp, regbase + 4 * i, bit);
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000675 }
676
Matt Carlson0d3031d2007-10-10 18:02:43 -0700677}
678
679static int tg3_ape_lock(struct tg3 *tp, int locknum)
680{
681 int i, off;
682 int ret = 0;
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000683 u32 status, req, gnt, bit;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700684
Joe Perches63c3a662011-04-26 08:12:10 +0000685 if (!tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -0700686 return 0;
687
688 switch (locknum) {
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000689 case TG3_APE_LOCK_GPIO:
690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
691 return 0;
Matt Carlson33f401a2010-04-05 10:19:27 +0000692 case TG3_APE_LOCK_GRC:
693 case TG3_APE_LOCK_MEM:
Matt Carlson78f94dc2011-11-04 09:14:58 +0000694 if (!tp->pci_fn)
695 bit = APE_LOCK_REQ_DRIVER;
696 else
697 bit = 1 << tp->pci_fn;
Matt Carlson33f401a2010-04-05 10:19:27 +0000698 break;
Michael Chan8151ad52012-07-29 19:15:41 +0000699 case TG3_APE_LOCK_PHY0:
700 case TG3_APE_LOCK_PHY1:
701 case TG3_APE_LOCK_PHY2:
702 case TG3_APE_LOCK_PHY3:
703 bit = APE_LOCK_REQ_DRIVER;
704 break;
Matt Carlson33f401a2010-04-05 10:19:27 +0000705 default:
706 return -EINVAL;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700707 }
708
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
710 req = TG3_APE_LOCK_REQ;
711 gnt = TG3_APE_LOCK_GRANT;
712 } else {
713 req = TG3_APE_PER_LOCK_REQ;
714 gnt = TG3_APE_PER_LOCK_GRANT;
715 }
716
Matt Carlson0d3031d2007-10-10 18:02:43 -0700717 off = 4 * locknum;
718
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000719 tg3_ape_write32(tp, req + off, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700720
721 /* Wait for up to 1 millisecond to acquire lock. */
722 for (i = 0; i < 100; i++) {
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000723 status = tg3_ape_read32(tp, gnt + off);
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000724 if (status == bit)
Matt Carlson0d3031d2007-10-10 18:02:43 -0700725 break;
726 udelay(10);
727 }
728
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000729 if (status != bit) {
Matt Carlson0d3031d2007-10-10 18:02:43 -0700730 /* Revoke the lock request. */
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000731 tg3_ape_write32(tp, gnt + off, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700732 ret = -EBUSY;
733 }
734
735 return ret;
736}
737
738static void tg3_ape_unlock(struct tg3 *tp, int locknum)
739{
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000740 u32 gnt, bit;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700741
Joe Perches63c3a662011-04-26 08:12:10 +0000742 if (!tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -0700743 return;
744
745 switch (locknum) {
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000746 case TG3_APE_LOCK_GPIO:
747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
748 return;
Matt Carlson33f401a2010-04-05 10:19:27 +0000749 case TG3_APE_LOCK_GRC:
750 case TG3_APE_LOCK_MEM:
Matt Carlson78f94dc2011-11-04 09:14:58 +0000751 if (!tp->pci_fn)
752 bit = APE_LOCK_GRANT_DRIVER;
753 else
754 bit = 1 << tp->pci_fn;
Matt Carlson33f401a2010-04-05 10:19:27 +0000755 break;
Michael Chan8151ad52012-07-29 19:15:41 +0000756 case TG3_APE_LOCK_PHY0:
757 case TG3_APE_LOCK_PHY1:
758 case TG3_APE_LOCK_PHY2:
759 case TG3_APE_LOCK_PHY3:
760 bit = APE_LOCK_GRANT_DRIVER;
761 break;
Matt Carlson33f401a2010-04-05 10:19:27 +0000762 default:
763 return;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700764 }
765
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
767 gnt = TG3_APE_LOCK_GRANT;
768 else
769 gnt = TG3_APE_PER_LOCK_GRANT;
770
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000771 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700772}
773
Matt Carlsonb65a3722012-07-16 16:24:00 +0000774static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000775{
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000776 u32 apedata;
777
Matt Carlsonb65a3722012-07-16 16:24:00 +0000778 while (timeout_us) {
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000779 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
Matt Carlsonb65a3722012-07-16 16:24:00 +0000780 return -EBUSY;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000781
782 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000783 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
784 break;
785
Matt Carlsonb65a3722012-07-16 16:24:00 +0000786 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
787
788 udelay(10);
789 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000790 }
791
Matt Carlsonb65a3722012-07-16 16:24:00 +0000792 return timeout_us ? 0 : -EBUSY;
793}
794
Matt Carlsoncf8d55a2012-07-16 16:24:01 +0000795static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
796{
797 u32 i, apedata;
798
799 for (i = 0; i < timeout_us / 10; i++) {
800 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
801
802 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
803 break;
804
805 udelay(10);
806 }
807
808 return i == timeout_us / 10;
809}
810
Michael Chan86449942012-10-02 20:31:14 -0700811static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
812 u32 len)
Matt Carlsoncf8d55a2012-07-16 16:24:01 +0000813{
814 int err;
815 u32 i, bufoff, msgoff, maxlen, apedata;
816
817 if (!tg3_flag(tp, APE_HAS_NCSI))
818 return 0;
819
820 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
821 if (apedata != APE_SEG_SIG_MAGIC)
822 return -ENODEV;
823
824 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
825 if (!(apedata & APE_FW_STATUS_READY))
826 return -EAGAIN;
827
828 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
829 TG3_APE_SHMEM_BASE;
830 msgoff = bufoff + 2 * sizeof(u32);
831 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
832
833 while (len) {
834 u32 length;
835
836 /* Cap xfer sizes to scratchpad limits. */
837 length = (len > maxlen) ? maxlen : len;
838 len -= length;
839
840 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
841 if (!(apedata & APE_FW_STATUS_READY))
842 return -EAGAIN;
843
844 /* Wait for up to 1 msec for APE to service previous event. */
845 err = tg3_ape_event_lock(tp, 1000);
846 if (err)
847 return err;
848
849 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
850 APE_EVENT_STATUS_SCRTCHPD_READ |
851 APE_EVENT_STATUS_EVENT_PENDING;
852 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
853
854 tg3_ape_write32(tp, bufoff, base_off);
855 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
856
857 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
858 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
859
860 base_off += length;
861
862 if (tg3_ape_wait_for_event(tp, 30000))
863 return -EAGAIN;
864
865 for (i = 0; length; i += 4, length -= 4) {
866 u32 val = tg3_ape_read32(tp, msgoff + i);
867 memcpy(data, &val, sizeof(u32));
868 data++;
869 }
870 }
871
872 return 0;
873}
874
Matt Carlsonb65a3722012-07-16 16:24:00 +0000875static int tg3_ape_send_event(struct tg3 *tp, u32 event)
876{
877 int err;
878 u32 apedata;
879
880 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
881 if (apedata != APE_SEG_SIG_MAGIC)
882 return -EAGAIN;
883
884 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
885 if (!(apedata & APE_FW_STATUS_READY))
886 return -EAGAIN;
887
888 /* Wait for up to 1 millisecond for APE to service previous event. */
889 err = tg3_ape_event_lock(tp, 1000);
890 if (err)
891 return err;
892
893 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
894 event | APE_EVENT_STATUS_EVENT_PENDING);
895
896 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
897 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
898
899 return 0;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000900}
901
902static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
903{
904 u32 event;
905 u32 apedata;
906
907 if (!tg3_flag(tp, ENABLE_APE))
908 return;
909
910 switch (kind) {
911 case RESET_KIND_INIT:
912 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
913 APE_HOST_SEG_SIG_MAGIC);
914 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
915 APE_HOST_SEG_LEN_MAGIC);
916 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
917 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
918 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
919 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
920 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
921 APE_HOST_BEHAV_NO_PHYLOCK);
922 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
923 TG3_APE_HOST_DRVR_STATE_START);
924
925 event = APE_EVENT_STATUS_STATE_START;
926 break;
927 case RESET_KIND_SHUTDOWN:
928 /* With the interface we are currently using,
929 * APE does not track driver state. Wiping
930 * out the HOST SEGMENT SIGNATURE forces
931 * the APE to assume OS absent status.
932 */
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
934
935 if (device_may_wakeup(&tp->pdev->dev) &&
936 tg3_flag(tp, WOL_ENABLE)) {
937 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
938 TG3_APE_HOST_WOL_SPEED_AUTO);
939 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
940 } else
941 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
942
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
944
945 event = APE_EVENT_STATUS_STATE_UNLOAD;
946 break;
947 case RESET_KIND_SUSPEND:
948 event = APE_EVENT_STATUS_STATE_SUSPEND;
949 break;
950 default:
951 return;
952 }
953
954 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
955
956 tg3_ape_send_event(tp, event);
957}
958
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959static void tg3_disable_ints(struct tg3 *tp)
960{
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000961 int i;
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 tw32(TG3PCI_MISC_HOST_CTRL,
964 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000965 for (i = 0; i < tp->irq_max; i++)
966 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967}
968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969static void tg3_enable_ints(struct tg3 *tp)
970{
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000971 int i;
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000972
Michael Chanbbe832c2005-06-24 20:20:04 -0700973 tp->irq_sync = 0;
974 wmb();
975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 tw32(TG3PCI_MISC_HOST_CTRL,
977 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Matt Carlsonf19af9c2009-09-01 12:47:49 +0000978
Matt Carlsonf89f38b2010-02-12 14:47:07 +0000979 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000980 for (i = 0; i < tp->irq_cnt; i++) {
981 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000982
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000983 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
Joe Perches63c3a662011-04-26 08:12:10 +0000984 if (tg3_flag(tp, 1SHOT_MSI))
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000985 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
986
Matt Carlsonf89f38b2010-02-12 14:47:07 +0000987 tp->coal_now |= tnapi->coal_now;
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000988 }
Matt Carlsonf19af9c2009-09-01 12:47:49 +0000989
990 /* Force an initial interrupt */
Joe Perches63c3a662011-04-26 08:12:10 +0000991 if (!tg3_flag(tp, TAGGED_STATUS) &&
Matt Carlsonf19af9c2009-09-01 12:47:49 +0000992 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
993 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
994 else
Matt Carlsonf89f38b2010-02-12 14:47:07 +0000995 tw32(HOSTCC_MODE, tp->coal_now);
996
997 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998}
999
Matt Carlson17375d22009-08-28 14:02:18 +00001000static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
Michael Chan04237dd2005-04-25 15:17:17 -07001001{
Matt Carlson17375d22009-08-28 14:02:18 +00001002 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00001003 struct tg3_hw_status *sblk = tnapi->hw_status;
Michael Chan04237dd2005-04-25 15:17:17 -07001004 unsigned int work_exists = 0;
1005
1006 /* check for phy events */
Joe Perches63c3a662011-04-26 08:12:10 +00001007 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
Michael Chan04237dd2005-04-25 15:17:17 -07001008 if (sblk->status & SD_STATUS_LINK_CHG)
1009 work_exists = 1;
1010 }
Matt Carlsonf891ea12012-04-24 13:37:01 +00001011
1012 /* check for TX work to do */
1013 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1014 work_exists = 1;
1015
1016 /* check for RX work to do */
1017 if (tnapi->rx_rcb_prod_idx &&
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00001018 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
Michael Chan04237dd2005-04-25 15:17:17 -07001019 work_exists = 1;
1020
1021 return work_exists;
1022}
1023
Matt Carlson17375d22009-08-28 14:02:18 +00001024/* tg3_int_reenable
Michael Chan04237dd2005-04-25 15:17:17 -07001025 * similar to tg3_enable_ints, but it accurately determines whether there
1026 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001027 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 */
Matt Carlson17375d22009-08-28 14:02:18 +00001029static void tg3_int_reenable(struct tg3_napi *tnapi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
Matt Carlson17375d22009-08-28 14:02:18 +00001031 struct tg3 *tp = tnapi->tp;
1032
Matt Carlson898a56f2009-08-28 14:02:40 +00001033 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 mmiowb();
1035
David S. Millerfac9b832005-05-18 22:46:34 -07001036 /* When doing tagged status, this work check is unnecessary.
1037 * The last_tag we write above tells the chip which piece of
1038 * work we've completed.
1039 */
Joe Perches63c3a662011-04-26 08:12:10 +00001040 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
Michael Chan04237dd2005-04-25 15:17:17 -07001041 tw32(HOSTCC_MODE, tp->coalesce_mode |
Matt Carlsonfd2ce372009-09-01 12:51:13 +00001042 HOSTCC_MODE_ENABLE | tnapi->coal_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043}
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045static void tg3_switch_clocks(struct tg3 *tp)
1046{
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00001047 u32 clock_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 u32 orig_clock_ctrl;
1049
Joe Perches63c3a662011-04-26 08:12:10 +00001050 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -07001051 return;
1052
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00001053 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 orig_clock_ctrl = clock_ctrl;
1056 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1057 CLOCK_CTRL_CLKRUN_OENABLE |
1058 0x1f);
1059 tp->pci_clock_ctrl = clock_ctrl;
1060
Joe Perches63c3a662011-04-26 08:12:10 +00001061 if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001063 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1064 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 }
1066 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001067 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1068 clock_ctrl |
1069 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1070 40);
1071 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1072 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1073 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001075 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076}
1077
1078#define PHY_BUSY_LOOPS 5000
1079
1080static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1081{
1082 u32 frame_val;
1083 unsigned int loops;
1084 int ret;
1085
1086 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1087 tw32_f(MAC_MI_MODE,
1088 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1089 udelay(80);
1090 }
1091
Michael Chan8151ad52012-07-29 19:15:41 +00001092 tg3_ape_lock(tp, tp->phy_ape_lock);
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 *val = 0x0;
1095
Matt Carlson882e9792009-09-01 13:21:36 +00001096 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 MI_COM_PHY_ADDR_MASK);
1098 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1099 MI_COM_REG_ADDR_MASK);
1100 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 tw32_f(MAC_MI_COM, frame_val);
1103
1104 loops = PHY_BUSY_LOOPS;
1105 while (loops != 0) {
1106 udelay(10);
1107 frame_val = tr32(MAC_MI_COM);
1108
1109 if ((frame_val & MI_COM_BUSY) == 0) {
1110 udelay(5);
1111 frame_val = tr32(MAC_MI_COM);
1112 break;
1113 }
1114 loops -= 1;
1115 }
1116
1117 ret = -EBUSY;
1118 if (loops != 0) {
1119 *val = frame_val & MI_COM_DATA_MASK;
1120 ret = 0;
1121 }
1122
1123 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1124 tw32_f(MAC_MI_MODE, tp->mi_mode);
1125 udelay(80);
1126 }
1127
Michael Chan8151ad52012-07-29 19:15:41 +00001128 tg3_ape_unlock(tp, tp->phy_ape_lock);
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 return ret;
1131}
1132
1133static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1134{
1135 u32 frame_val;
1136 unsigned int loops;
1137 int ret;
1138
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001139 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
Matt Carlson221c5632011-06-13 13:39:01 +00001140 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
Michael Chanb5d37722006-09-27 16:06:21 -07001141 return 0;
1142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1144 tw32_f(MAC_MI_MODE,
1145 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1146 udelay(80);
1147 }
1148
Michael Chan8151ad52012-07-29 19:15:41 +00001149 tg3_ape_lock(tp, tp->phy_ape_lock);
1150
Matt Carlson882e9792009-09-01 13:21:36 +00001151 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 MI_COM_PHY_ADDR_MASK);
1153 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1154 MI_COM_REG_ADDR_MASK);
1155 frame_val |= (val & MI_COM_DATA_MASK);
1156 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 tw32_f(MAC_MI_COM, frame_val);
1159
1160 loops = PHY_BUSY_LOOPS;
1161 while (loops != 0) {
1162 udelay(10);
1163 frame_val = tr32(MAC_MI_COM);
1164 if ((frame_val & MI_COM_BUSY) == 0) {
1165 udelay(5);
1166 frame_val = tr32(MAC_MI_COM);
1167 break;
1168 }
1169 loops -= 1;
1170 }
1171
1172 ret = -EBUSY;
1173 if (loops != 0)
1174 ret = 0;
1175
1176 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1177 tw32_f(MAC_MI_MODE, tp->mi_mode);
1178 udelay(80);
1179 }
1180
Michael Chan8151ad52012-07-29 19:15:41 +00001181 tg3_ape_unlock(tp, tp->phy_ape_lock);
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 return ret;
1184}
1185
Matt Carlsonb0988c12011-04-20 07:57:39 +00001186static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1187{
1188 int err;
1189
1190 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1191 if (err)
1192 goto done;
1193
1194 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1195 if (err)
1196 goto done;
1197
1198 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1199 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1200 if (err)
1201 goto done;
1202
1203 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1204
1205done:
1206 return err;
1207}
1208
1209static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1210{
1211 int err;
1212
1213 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1214 if (err)
1215 goto done;
1216
1217 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1218 if (err)
1219 goto done;
1220
1221 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1222 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1223 if (err)
1224 goto done;
1225
1226 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1227
1228done:
1229 return err;
1230}
1231
1232static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1233{
1234 int err;
1235
1236 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1237 if (!err)
1238 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1239
1240 return err;
1241}
1242
1243static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1244{
1245 int err;
1246
1247 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1248 if (!err)
1249 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1250
1251 return err;
1252}
1253
Matt Carlson15ee95c2011-04-20 07:57:40 +00001254static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1255{
1256 int err;
1257
1258 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1259 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1260 MII_TG3_AUXCTL_SHDWSEL_MISC);
1261 if (!err)
1262 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1263
1264 return err;
1265}
1266
Matt Carlsonb4bd2922011-04-20 07:57:41 +00001267static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1268{
1269 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1270 set |= MII_TG3_AUXCTL_MISC_WREN;
1271
1272 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1273}
1274
Matt Carlson1d36ba42011-04-20 07:57:42 +00001275#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1276 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1277 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1278 MII_TG3_AUXCTL_ACTL_TX_6DB)
1279
1280#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1281 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1282 MII_TG3_AUXCTL_ACTL_TX_6DB);
1283
Matt Carlson95e28692008-05-25 23:44:14 -07001284static int tg3_bmcr_reset(struct tg3 *tp)
1285{
1286 u32 phy_control;
1287 int limit, err;
1288
1289 /* OK, reset it, and poll the BMCR_RESET bit until it
1290 * clears or we time out.
1291 */
1292 phy_control = BMCR_RESET;
1293 err = tg3_writephy(tp, MII_BMCR, phy_control);
1294 if (err != 0)
1295 return -EBUSY;
1296
1297 limit = 5000;
1298 while (limit--) {
1299 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1300 if (err != 0)
1301 return -EBUSY;
1302
1303 if ((phy_control & BMCR_RESET) == 0) {
1304 udelay(40);
1305 break;
1306 }
1307 udelay(10);
1308 }
Roel Kluind4675b52009-02-12 16:33:27 -08001309 if (limit < 0)
Matt Carlson95e28692008-05-25 23:44:14 -07001310 return -EBUSY;
1311
1312 return 0;
1313}
1314
Matt Carlson158d7ab2008-05-29 01:37:54 -07001315static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1316{
Francois Romieu3d165432009-01-19 16:56:50 -08001317 struct tg3 *tp = bp->priv;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001318 u32 val;
1319
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001320 spin_lock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001321
1322 if (tg3_readphy(tp, reg, &val))
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001323 val = -EIO;
1324
1325 spin_unlock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001326
1327 return val;
1328}
1329
1330static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1331{
Francois Romieu3d165432009-01-19 16:56:50 -08001332 struct tg3 *tp = bp->priv;
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001333 u32 ret = 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001334
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001335 spin_lock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001336
1337 if (tg3_writephy(tp, reg, val))
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001338 ret = -EIO;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001339
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001340 spin_unlock_bh(&tp->lock);
1341
1342 return ret;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001343}
1344
1345static int tg3_mdio_reset(struct mii_bus *bp)
1346{
1347 return 0;
1348}
1349
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001350static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -07001351{
1352 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001353 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -07001354
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001355 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001356 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlson6a443a02010-02-17 15:17:04 +00001357 case PHY_ID_BCM50610:
1358 case PHY_ID_BCM50610M:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001359 val = MAC_PHYCFG2_50610_LED_MODES;
1360 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001361 case PHY_ID_BCMAC131:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001362 val = MAC_PHYCFG2_AC131_LED_MODES;
1363 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001364 case PHY_ID_RTL8211C:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001365 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1366 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001367 case PHY_ID_RTL8201E:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001368 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1369 break;
1370 default:
Matt Carlsona9daf362008-05-25 23:49:44 -07001371 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001372 }
1373
1374 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1375 tw32(MAC_PHYCFG2, val);
1376
1377 val = tr32(MAC_PHYCFG1);
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001378 val &= ~(MAC_PHYCFG1_RGMII_INT |
1379 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1380 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001381 tw32(MAC_PHYCFG1, val);
1382
1383 return;
1384 }
1385
Joe Perches63c3a662011-04-26 08:12:10 +00001386 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001387 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1388 MAC_PHYCFG2_FMODE_MASK_MASK |
1389 MAC_PHYCFG2_GMODE_MASK_MASK |
1390 MAC_PHYCFG2_ACT_MASK_MASK |
1391 MAC_PHYCFG2_QUAL_MASK_MASK |
1392 MAC_PHYCFG2_INBAND_ENABLE;
1393
1394 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -07001395
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001396 val = tr32(MAC_PHYCFG1);
1397 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1398 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
Joe Perches63c3a662011-04-26 08:12:10 +00001399 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1400 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001401 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
Joe Perches63c3a662011-04-26 08:12:10 +00001402 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001403 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1404 }
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001405 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1406 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1407 tw32(MAC_PHYCFG1, val);
Matt Carlsona9daf362008-05-25 23:49:44 -07001408
Matt Carlsona9daf362008-05-25 23:49:44 -07001409 val = tr32(MAC_EXT_RGMII_MODE);
1410 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1411 MAC_RGMII_MODE_RX_QUALITY |
1412 MAC_RGMII_MODE_RX_ACTIVITY |
1413 MAC_RGMII_MODE_RX_ENG_DET |
1414 MAC_RGMII_MODE_TX_ENABLE |
1415 MAC_RGMII_MODE_TX_LOWPWR |
1416 MAC_RGMII_MODE_TX_RESET);
Joe Perches63c3a662011-04-26 08:12:10 +00001417 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1418 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001419 val |= MAC_RGMII_MODE_RX_INT_B |
1420 MAC_RGMII_MODE_RX_QUALITY |
1421 MAC_RGMII_MODE_RX_ACTIVITY |
1422 MAC_RGMII_MODE_RX_ENG_DET;
Joe Perches63c3a662011-04-26 08:12:10 +00001423 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001424 val |= MAC_RGMII_MODE_TX_ENABLE |
1425 MAC_RGMII_MODE_TX_LOWPWR |
1426 MAC_RGMII_MODE_TX_RESET;
1427 }
1428 tw32(MAC_EXT_RGMII_MODE, val);
1429}
1430
Matt Carlson158d7ab2008-05-29 01:37:54 -07001431static void tg3_mdio_start(struct tg3 *tp)
1432{
Matt Carlson158d7ab2008-05-29 01:37:54 -07001433 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1434 tw32_f(MAC_MI_MODE, tp->mi_mode);
1435 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -07001436
Joe Perches63c3a662011-04-26 08:12:10 +00001437 if (tg3_flag(tp, MDIOBUS_INITED) &&
Matt Carlson9ea48182010-02-17 15:17:01 +00001438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1439 tg3_mdio_config_5785(tp);
1440}
1441
1442static int tg3_mdio_init(struct tg3 *tp)
1443{
1444 int i;
1445 u32 reg;
1446 struct phy_device *phydev;
1447
Joe Perches63c3a662011-04-26 08:12:10 +00001448 if (tg3_flag(tp, 5717_PLUS)) {
Matt Carlson9c7df912010-06-05 17:24:36 +00001449 u32 is_serdes;
Matt Carlson882e9792009-09-01 13:21:36 +00001450
Matt Carlson69f11c92011-07-13 09:27:30 +00001451 tp->phy_addr = tp->pci_fn + 1;
Matt Carlson882e9792009-09-01 13:21:36 +00001452
Matt Carlsond1ec96a2010-01-12 10:11:38 +00001453 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1454 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1455 else
1456 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1457 TG3_CPMU_PHY_STRAP_IS_SERDES;
Matt Carlson882e9792009-09-01 13:21:36 +00001458 if (is_serdes)
1459 tp->phy_addr += 7;
1460 } else
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001461 tp->phy_addr = TG3_PHY_MII_ADDR;
Matt Carlson882e9792009-09-01 13:21:36 +00001462
Matt Carlson158d7ab2008-05-29 01:37:54 -07001463 tg3_mdio_start(tp);
1464
Joe Perches63c3a662011-04-26 08:12:10 +00001465 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
Matt Carlson158d7ab2008-05-29 01:37:54 -07001466 return 0;
1467
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001468 tp->mdio_bus = mdiobus_alloc();
1469 if (tp->mdio_bus == NULL)
1470 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001471
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001472 tp->mdio_bus->name = "tg3 mdio bus";
1473 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -07001474 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001475 tp->mdio_bus->priv = tp;
1476 tp->mdio_bus->parent = &tp->pdev->dev;
1477 tp->mdio_bus->read = &tg3_mdio_read;
1478 tp->mdio_bus->write = &tg3_mdio_write;
1479 tp->mdio_bus->reset = &tg3_mdio_reset;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001480 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001481 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001482
1483 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001484 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001485
1486 /* The bus registration will look for all the PHYs on the mdio bus.
1487 * Unfortunately, it does not ensure the PHY is powered up before
1488 * accessing the PHY ID registers. A chip reset is the
1489 * quickest way to bring the device back to an operational state..
1490 */
1491 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1492 tg3_bmcr_reset(tp);
1493
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001494 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001495 if (i) {
Matt Carlsonab96b242010-04-05 10:19:22 +00001496 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001497 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001498 return i;
1499 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001500
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001501 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001502
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001503 if (!phydev || !phydev->drv) {
Matt Carlsonab96b242010-04-05 10:19:22 +00001504 dev_warn(&tp->pdev->dev, "No PHY devices\n");
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001505 mdiobus_unregister(tp->mdio_bus);
1506 mdiobus_free(tp->mdio_bus);
1507 return -ENODEV;
1508 }
1509
1510 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlson6a443a02010-02-17 15:17:04 +00001511 case PHY_ID_BCM57780:
Matt Carlson321d32a2008-11-21 17:22:19 -08001512 phydev->interface = PHY_INTERFACE_MODE_GMII;
Matt Carlsonc704dc22009-11-02 14:32:12 +00001513 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
Matt Carlson321d32a2008-11-21 17:22:19 -08001514 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001515 case PHY_ID_BCM50610:
1516 case PHY_ID_BCM50610M:
Matt Carlson32e5a8d2009-11-02 14:31:39 +00001517 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
Matt Carlsonc704dc22009-11-02 14:32:12 +00001518 PHY_BRCM_RX_REFCLK_UNUSED |
Matt Carlson52fae082009-11-02 14:32:38 +00001519 PHY_BRCM_DIS_TXCRXC_NOENRGY |
Matt Carlsonc704dc22009-11-02 14:32:12 +00001520 PHY_BRCM_AUTO_PWRDWN_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001521 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
Matt Carlsona9daf362008-05-25 23:49:44 -07001522 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001523 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001524 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001525 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001526 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001527 /* fallthru */
Matt Carlson6a443a02010-02-17 15:17:04 +00001528 case PHY_ID_RTL8211C:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001529 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001530 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001531 case PHY_ID_RTL8201E:
1532 case PHY_ID_BCMAC131:
Matt Carlsona9daf362008-05-25 23:49:44 -07001533 phydev->interface = PHY_INTERFACE_MODE_MII;
Matt Carlsoncdd4e09d2009-11-02 14:31:11 +00001534 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001535 tp->phy_flags |= TG3_PHYFLG_IS_FET;
Matt Carlsona9daf362008-05-25 23:49:44 -07001536 break;
1537 }
1538
Joe Perches63c3a662011-04-26 08:12:10 +00001539 tg3_flag_set(tp, MDIOBUS_INITED);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001540
1541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1542 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001543
1544 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001545}
1546
1547static void tg3_mdio_fini(struct tg3 *tp)
1548{
Joe Perches63c3a662011-04-26 08:12:10 +00001549 if (tg3_flag(tp, MDIOBUS_INITED)) {
1550 tg3_flag_clear(tp, MDIOBUS_INITED);
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001551 mdiobus_unregister(tp->mdio_bus);
1552 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001553 }
1554}
1555
Matt Carlson95e28692008-05-25 23:44:14 -07001556/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001557static inline void tg3_generate_fw_event(struct tg3 *tp)
1558{
1559 u32 val;
1560
1561 val = tr32(GRC_RX_CPU_EVENT);
1562 val |= GRC_RX_CPU_DRIVER_EVENT;
1563 tw32_f(GRC_RX_CPU_EVENT, val);
1564
1565 tp->last_event_jiffies = jiffies;
1566}
1567
1568#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1569
1570/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001571static void tg3_wait_for_event_ack(struct tg3 *tp)
1572{
1573 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001574 unsigned int delay_cnt;
1575 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001576
Matt Carlson4ba526c2008-08-15 14:10:04 -07001577 /* If enough time has passed, no wait is necessary. */
1578 time_remain = (long)(tp->last_event_jiffies + 1 +
1579 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1580 (long)jiffies;
1581 if (time_remain < 0)
1582 return;
1583
1584 /* Check if we can shorten the wait time. */
1585 delay_cnt = jiffies_to_usecs(time_remain);
1586 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1587 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1588 delay_cnt = (delay_cnt >> 3) + 1;
1589
1590 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001591 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1592 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001593 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001594 }
1595}
1596
1597/* tp->lock is held. */
Matt Carlsonb28f3892012-02-13 15:20:12 +00001598static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
Matt Carlson95e28692008-05-25 23:44:14 -07001599{
Matt Carlsonb28f3892012-02-13 15:20:12 +00001600 u32 reg, val;
Matt Carlson95e28692008-05-25 23:44:14 -07001601
1602 val = 0;
1603 if (!tg3_readphy(tp, MII_BMCR, &reg))
1604 val = reg << 16;
1605 if (!tg3_readphy(tp, MII_BMSR, &reg))
1606 val |= (reg & 0xffff);
Matt Carlsonb28f3892012-02-13 15:20:12 +00001607 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001608
1609 val = 0;
1610 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1611 val = reg << 16;
1612 if (!tg3_readphy(tp, MII_LPA, &reg))
1613 val |= (reg & 0xffff);
Matt Carlsonb28f3892012-02-13 15:20:12 +00001614 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001615
1616 val = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001617 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
Matt Carlson95e28692008-05-25 23:44:14 -07001618 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1619 val = reg << 16;
1620 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1621 val |= (reg & 0xffff);
1622 }
Matt Carlsonb28f3892012-02-13 15:20:12 +00001623 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001624
1625 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1626 val = reg << 16;
1627 else
1628 val = 0;
Matt Carlsonb28f3892012-02-13 15:20:12 +00001629 *data++ = val;
1630}
1631
1632/* tp->lock is held. */
1633static void tg3_ump_link_report(struct tg3 *tp)
1634{
1635 u32 data[4];
1636
1637 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1638 return;
1639
1640 tg3_phy_gather_ump_data(tp, data);
1641
1642 tg3_wait_for_event_ack(tp);
1643
1644 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1645 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1646 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1647 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1648 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1649 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
Matt Carlson95e28692008-05-25 23:44:14 -07001650
Matt Carlson4ba526c2008-08-15 14:10:04 -07001651 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001652}
1653
Matt Carlson8d5a89b2011-08-31 11:44:51 +00001654/* tp->lock is held. */
1655static void tg3_stop_fw(struct tg3 *tp)
1656{
1657 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1658 /* Wait for RX cpu to ACK the previous event. */
1659 tg3_wait_for_event_ack(tp);
1660
1661 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1662
1663 tg3_generate_fw_event(tp);
1664
1665 /* Wait for RX cpu to ACK this event. */
1666 tg3_wait_for_event_ack(tp);
1667 }
1668}
1669
Matt Carlsonfd6d3f02011-08-31 11:44:52 +00001670/* tp->lock is held. */
1671static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1672{
1673 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1674 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1675
1676 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1677 switch (kind) {
1678 case RESET_KIND_INIT:
1679 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1680 DRV_STATE_START);
1681 break;
1682
1683 case RESET_KIND_SHUTDOWN:
1684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685 DRV_STATE_UNLOAD);
1686 break;
1687
1688 case RESET_KIND_SUSPEND:
1689 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690 DRV_STATE_SUSPEND);
1691 break;
1692
1693 default:
1694 break;
1695 }
1696 }
1697
1698 if (kind == RESET_KIND_INIT ||
1699 kind == RESET_KIND_SUSPEND)
1700 tg3_ape_driver_state_change(tp, kind);
1701}
1702
1703/* tp->lock is held. */
1704static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1705{
1706 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1707 switch (kind) {
1708 case RESET_KIND_INIT:
1709 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1710 DRV_STATE_START_DONE);
1711 break;
1712
1713 case RESET_KIND_SHUTDOWN:
1714 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1715 DRV_STATE_UNLOAD_DONE);
1716 break;
1717
1718 default:
1719 break;
1720 }
1721 }
1722
1723 if (kind == RESET_KIND_SHUTDOWN)
1724 tg3_ape_driver_state_change(tp, kind);
1725}
1726
1727/* tp->lock is held. */
1728static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1729{
1730 if (tg3_flag(tp, ENABLE_ASF)) {
1731 switch (kind) {
1732 case RESET_KIND_INIT:
1733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 DRV_STATE_START);
1735 break;
1736
1737 case RESET_KIND_SHUTDOWN:
1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739 DRV_STATE_UNLOAD);
1740 break;
1741
1742 case RESET_KIND_SUSPEND:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744 DRV_STATE_SUSPEND);
1745 break;
1746
1747 default:
1748 break;
1749 }
1750 }
1751}
1752
1753static int tg3_poll_fw(struct tg3 *tp)
1754{
1755 int i;
1756 u32 val;
1757
1758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1759 /* Wait up to 20ms for init done. */
1760 for (i = 0; i < 200; i++) {
1761 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1762 return 0;
1763 udelay(100);
1764 }
1765 return -ENODEV;
1766 }
1767
1768 /* Wait for firmware initialization to complete. */
1769 for (i = 0; i < 100000; i++) {
1770 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1771 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1772 break;
1773 udelay(10);
1774 }
1775
1776 /* Chip might not be fitted with firmware. Some Sun onboard
1777 * parts are configured like that. So don't signal the timeout
1778 * of the above loop as an error, but do report the lack of
1779 * running firmware once.
1780 */
1781 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1782 tg3_flag_set(tp, NO_FWARE_REPORTED);
1783
1784 netdev_info(tp->dev, "No firmware running\n");
1785 }
1786
1787 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1788 /* The 57765 A0 needs a little more
1789 * time to do some important work.
1790 */
1791 mdelay(10);
1792 }
1793
1794 return 0;
1795}
1796
Matt Carlson95e28692008-05-25 23:44:14 -07001797static void tg3_link_report(struct tg3 *tp)
1798{
1799 if (!netif_carrier_ok(tp->dev)) {
Joe Perches05dbe002010-02-17 19:44:19 +00001800 netif_info(tp, link, tp->dev, "Link is down\n");
Matt Carlson95e28692008-05-25 23:44:14 -07001801 tg3_ump_link_report(tp);
1802 } else if (netif_msg_link(tp)) {
Joe Perches05dbe002010-02-17 19:44:19 +00001803 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1804 (tp->link_config.active_speed == SPEED_1000 ?
1805 1000 :
1806 (tp->link_config.active_speed == SPEED_100 ?
1807 100 : 10)),
1808 (tp->link_config.active_duplex == DUPLEX_FULL ?
1809 "full" : "half"));
Matt Carlson95e28692008-05-25 23:44:14 -07001810
Joe Perches05dbe002010-02-17 19:44:19 +00001811 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1812 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1813 "on" : "off",
1814 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1815 "on" : "off");
Matt Carlson47007832011-04-20 07:57:43 +00001816
1817 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1818 netdev_info(tp->dev, "EEE is %s\n",
1819 tp->setlpicnt ? "enabled" : "disabled");
1820
Matt Carlson95e28692008-05-25 23:44:14 -07001821 tg3_ump_link_report(tp);
1822 }
1823}
1824
Matt Carlson95e28692008-05-25 23:44:14 -07001825static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1826{
1827 u16 miireg;
1828
Steve Glendinninge18ce342008-12-16 02:00:00 -08001829 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
Matt Carlson95e28692008-05-25 23:44:14 -07001830 miireg = ADVERTISE_1000XPAUSE;
Steve Glendinninge18ce342008-12-16 02:00:00 -08001831 else if (flow_ctrl & FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001832 miireg = ADVERTISE_1000XPSE_ASYM;
Steve Glendinninge18ce342008-12-16 02:00:00 -08001833 else if (flow_ctrl & FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001834 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1835 else
1836 miireg = 0;
1837
1838 return miireg;
1839}
1840
Matt Carlson95e28692008-05-25 23:44:14 -07001841static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1842{
1843 u8 cap = 0;
1844
Matt Carlsonf3791cd2011-11-21 15:01:17 +00001845 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1846 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1847 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1848 if (lcladv & ADVERTISE_1000XPAUSE)
1849 cap = FLOW_CTRL_RX;
1850 if (rmtadv & ADVERTISE_1000XPAUSE)
Steve Glendinninge18ce342008-12-16 02:00:00 -08001851 cap = FLOW_CTRL_TX;
Matt Carlson95e28692008-05-25 23:44:14 -07001852 }
1853
1854 return cap;
1855}
1856
Matt Carlsonf51f3562008-05-25 23:45:08 -07001857static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001858{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001859 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001860 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001861 u32 old_rx_mode = tp->rx_mode;
1862 u32 old_tx_mode = tp->tx_mode;
1863
Joe Perches63c3a662011-04-26 08:12:10 +00001864 if (tg3_flag(tp, USE_PHYLIB))
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001865 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001866 else
1867 autoneg = tp->link_config.autoneg;
1868
Joe Perches63c3a662011-04-26 08:12:10 +00001869 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001870 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001871 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001872 else
Steve Glendinningbc02ff92008-12-16 02:00:48 -08001873 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
Matt Carlsonf51f3562008-05-25 23:45:08 -07001874 } else
1875 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001876
Matt Carlsonf51f3562008-05-25 23:45:08 -07001877 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001878
Steve Glendinninge18ce342008-12-16 02:00:00 -08001879 if (flowctrl & FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001880 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1881 else
1882 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1883
Matt Carlsonf51f3562008-05-25 23:45:08 -07001884 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001885 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001886
Steve Glendinninge18ce342008-12-16 02:00:00 -08001887 if (flowctrl & FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001888 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1889 else
1890 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1891
Matt Carlsonf51f3562008-05-25 23:45:08 -07001892 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001893 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001894}
1895
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001896static void tg3_adjust_link(struct net_device *dev)
1897{
1898 u8 oldflowctrl, linkmesg = 0;
1899 u32 mac_mode, lcl_adv, rmt_adv;
1900 struct tg3 *tp = netdev_priv(dev);
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001901 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001902
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001903 spin_lock_bh(&tp->lock);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001904
1905 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1906 MAC_MODE_HALF_DUPLEX);
1907
1908 oldflowctrl = tp->link_config.active_flowctrl;
1909
1910 if (phydev->link) {
1911 lcl_adv = 0;
1912 rmt_adv = 0;
1913
1914 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1915 mac_mode |= MAC_MODE_PORT_MODE_MII;
Matt Carlsonc3df0742009-11-02 14:27:02 +00001916 else if (phydev->speed == SPEED_1000 ||
1917 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001918 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Matt Carlsonc3df0742009-11-02 14:27:02 +00001919 else
1920 mac_mode |= MAC_MODE_PORT_MODE_MII;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001921
1922 if (phydev->duplex == DUPLEX_HALF)
1923 mac_mode |= MAC_MODE_HALF_DUPLEX;
1924 else {
Matt Carlsonf88788f2011-12-14 11:10:00 +00001925 lcl_adv = mii_advertise_flowctrl(
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001926 tp->link_config.flowctrl);
1927
1928 if (phydev->pause)
1929 rmt_adv = LPA_PAUSE_CAP;
1930 if (phydev->asym_pause)
1931 rmt_adv |= LPA_PAUSE_ASYM;
1932 }
1933
1934 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1935 } else
1936 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1937
1938 if (mac_mode != tp->mac_mode) {
1939 tp->mac_mode = mac_mode;
1940 tw32_f(MAC_MODE, tp->mac_mode);
1941 udelay(40);
1942 }
1943
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1945 if (phydev->speed == SPEED_10)
1946 tw32(MAC_MI_STAT,
1947 MAC_MI_STAT_10MBPS_MODE |
1948 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1949 else
1950 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1951 }
1952
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001953 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1954 tw32(MAC_TX_LENGTHS,
1955 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1956 (6 << TX_LENGTHS_IPG_SHIFT) |
1957 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1958 else
1959 tw32(MAC_TX_LENGTHS,
1960 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1961 (6 << TX_LENGTHS_IPG_SHIFT) |
1962 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1963
Matt Carlson34655ad2012-02-22 12:35:18 +00001964 if (phydev->link != tp->old_link ||
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001965 phydev->speed != tp->link_config.active_speed ||
1966 phydev->duplex != tp->link_config.active_duplex ||
1967 oldflowctrl != tp->link_config.active_flowctrl)
Matt Carlsonc6cdf432010-04-05 10:19:26 +00001968 linkmesg = 1;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001969
Matt Carlson34655ad2012-02-22 12:35:18 +00001970 tp->old_link = phydev->link;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001971 tp->link_config.active_speed = phydev->speed;
1972 tp->link_config.active_duplex = phydev->duplex;
1973
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001974 spin_unlock_bh(&tp->lock);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001975
1976 if (linkmesg)
1977 tg3_link_report(tp);
1978}
1979
1980static int tg3_phy_init(struct tg3 *tp)
1981{
1982 struct phy_device *phydev;
1983
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001984 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001985 return 0;
1986
1987 /* Bring the PHY back to a known state. */
1988 tg3_bmcr_reset(tp);
1989
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001990 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001991
1992 /* Attach the MAC to the PHY. */
Kay Sieversfb28ad32008-11-10 13:55:14 -08001993 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
Matt Carlsona9daf362008-05-25 23:49:44 -07001994 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001995 if (IS_ERR(phydev)) {
Matt Carlsonab96b242010-04-05 10:19:22 +00001996 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001997 return PTR_ERR(phydev);
1998 }
1999
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002000 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002001 switch (phydev->interface) {
2002 case PHY_INTERFACE_MODE_GMII:
2003 case PHY_INTERFACE_MODE_RGMII:
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002004 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
Matt Carlson321d32a2008-11-21 17:22:19 -08002005 phydev->supported &= (PHY_GBIT_FEATURES |
2006 SUPPORTED_Pause |
2007 SUPPORTED_Asym_Pause);
2008 break;
2009 }
2010 /* fallthru */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002011 case PHY_INTERFACE_MODE_MII:
2012 phydev->supported &= (PHY_BASIC_FEATURES |
2013 SUPPORTED_Pause |
2014 SUPPORTED_Asym_Pause);
2015 break;
2016 default:
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002017 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002018 return -EINVAL;
2019 }
2020
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002021 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002022
2023 phydev->advertising = phydev->supported;
2024
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002025 return 0;
2026}
2027
2028static void tg3_phy_start(struct tg3 *tp)
2029{
2030 struct phy_device *phydev;
2031
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002032 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002033 return;
2034
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002035 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002036
Matt Carlson800960682010-08-02 11:26:06 +00002037 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2038 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
Matt Carlsonc6700ce2012-02-13 15:20:15 +00002039 phydev->speed = tp->link_config.speed;
2040 phydev->duplex = tp->link_config.duplex;
2041 phydev->autoneg = tp->link_config.autoneg;
2042 phydev->advertising = tp->link_config.advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002043 }
2044
2045 phy_start(phydev);
2046
2047 phy_start_aneg(phydev);
2048}
2049
2050static void tg3_phy_stop(struct tg3 *tp)
2051{
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002052 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002053 return;
2054
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002055 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002056}
2057
2058static void tg3_phy_fini(struct tg3 *tp)
2059{
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002060 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002061 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002062 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002063 }
2064}
2065
Matt Carlson941ec902011-08-19 13:58:23 +00002066static int tg3_phy_set_extloopbk(struct tg3 *tp)
2067{
2068 int err;
2069 u32 val;
2070
2071 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2072 return 0;
2073
2074 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2075 /* Cannot do read-modify-write on 5401 */
2076 err = tg3_phy_auxctl_write(tp,
2077 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2078 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2079 0x4c20);
2080 goto done;
2081 }
2082
2083 err = tg3_phy_auxctl_read(tp,
2084 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2085 if (err)
2086 return err;
2087
2088 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2089 err = tg3_phy_auxctl_write(tp,
2090 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2091
2092done:
2093 return err;
2094}
2095
Matt Carlson7f97a4b2009-08-25 10:10:03 +00002096static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2097{
2098 u32 phytest;
2099
2100 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2101 u32 phy;
2102
2103 tg3_writephy(tp, MII_TG3_FET_TEST,
2104 phytest | MII_TG3_FET_SHADOW_EN);
2105 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2106 if (enable)
2107 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2108 else
2109 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2110 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2111 }
2112 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2113 }
2114}
2115
Matt Carlson6833c042008-11-21 17:18:59 -08002116static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2117{
2118 u32 reg;
2119
Joe Perches63c3a662011-04-26 08:12:10 +00002120 if (!tg3_flag(tp, 5705_PLUS) ||
2121 (tg3_flag(tp, 5717_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002122 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
Matt Carlson6833c042008-11-21 17:18:59 -08002123 return;
2124
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002125 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson7f97a4b2009-08-25 10:10:03 +00002126 tg3_phy_fet_toggle_apd(tp, enable);
2127 return;
2128 }
2129
Matt Carlson6833c042008-11-21 17:18:59 -08002130 reg = MII_TG3_MISC_SHDW_WREN |
2131 MII_TG3_MISC_SHDW_SCR5_SEL |
2132 MII_TG3_MISC_SHDW_SCR5_LPED |
2133 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2134 MII_TG3_MISC_SHDW_SCR5_SDTL |
2135 MII_TG3_MISC_SHDW_SCR5_C125OE;
2136 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2137 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2138
2139 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2140
2141
2142 reg = MII_TG3_MISC_SHDW_WREN |
2143 MII_TG3_MISC_SHDW_APD_SEL |
2144 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2145 if (enable)
2146 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2147
2148 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2149}
2150
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002151static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2152{
2153 u32 phy;
2154
Joe Perches63c3a662011-04-26 08:12:10 +00002155 if (!tg3_flag(tp, 5705_PLUS) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002156 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002157 return;
2158
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002159 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002160 u32 ephy;
2161
Matt Carlson535ef6e2009-08-25 10:09:36 +00002162 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2163 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2164
2165 tg3_writephy(tp, MII_TG3_FET_TEST,
2166 ephy | MII_TG3_FET_SHADOW_EN);
2167 if (!tg3_readphy(tp, reg, &phy)) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002168 if (enable)
Matt Carlson535ef6e2009-08-25 10:09:36 +00002169 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002170 else
Matt Carlson535ef6e2009-08-25 10:09:36 +00002171 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2172 tg3_writephy(tp, reg, phy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002173 }
Matt Carlson535ef6e2009-08-25 10:09:36 +00002174 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002175 }
2176 } else {
Matt Carlson15ee95c2011-04-20 07:57:40 +00002177 int ret;
2178
2179 ret = tg3_phy_auxctl_read(tp,
2180 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2181 if (!ret) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002182 if (enable)
2183 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2184 else
2185 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002186 tg3_phy_auxctl_write(tp,
2187 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002188 }
2189 }
2190}
2191
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192static void tg3_phy_set_wirespeed(struct tg3 *tp)
2193{
Matt Carlson15ee95c2011-04-20 07:57:40 +00002194 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 u32 val;
2196
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002197 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 return;
2199
Matt Carlson15ee95c2011-04-20 07:57:40 +00002200 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2201 if (!ret)
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002202 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2203 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204}
2205
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002206static void tg3_phy_apply_otp(struct tg3 *tp)
2207{
2208 u32 otp, phy;
2209
2210 if (!tp->phy_otp)
2211 return;
2212
2213 otp = tp->phy_otp;
2214
Matt Carlson1d36ba42011-04-20 07:57:42 +00002215 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2216 return;
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002217
2218 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2219 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2220 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2221
2222 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2223 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2224 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2225
2226 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2227 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2228 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2229
2230 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2231 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2232
2233 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2234 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2235
2236 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2237 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2238 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2239
Matt Carlson1d36ba42011-04-20 07:57:42 +00002240 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002241}
2242
Matt Carlson52b02d02010-10-14 10:37:41 +00002243static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2244{
2245 u32 val;
2246
2247 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2248 return;
2249
2250 tp->setlpicnt = 0;
2251
2252 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2253 current_link_up == 1 &&
Matt Carlsona6b68da2010-12-06 08:28:52 +00002254 tp->link_config.active_duplex == DUPLEX_FULL &&
2255 (tp->link_config.active_speed == SPEED_100 ||
2256 tp->link_config.active_speed == SPEED_1000)) {
Matt Carlson52b02d02010-10-14 10:37:41 +00002257 u32 eeectl;
2258
2259 if (tp->link_config.active_speed == SPEED_1000)
2260 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2261 else
2262 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2263
2264 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2265
Matt Carlson3110f5f52010-12-06 08:28:50 +00002266 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2267 TG3_CL45_D7_EEERES_STAT, &val);
Matt Carlson52b02d02010-10-14 10:37:41 +00002268
Matt Carlsonb0c59432011-05-19 12:12:48 +00002269 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2270 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
Matt Carlson52b02d02010-10-14 10:37:41 +00002271 tp->setlpicnt = 2;
2272 }
2273
2274 if (!tp->setlpicnt) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00002275 if (current_link_up == 1 &&
2276 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2277 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2278 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2279 }
2280
Matt Carlson52b02d02010-10-14 10:37:41 +00002281 val = tr32(TG3_CPMU_EEE_MODE);
2282 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2283 }
2284}
2285
Matt Carlsonb0c59432011-05-19 12:12:48 +00002286static void tg3_phy_eee_enable(struct tg3 *tp)
2287{
2288 u32 val;
2289
2290 if (tp->link_config.active_speed == SPEED_1000 &&
2291 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00002293 tg3_flag(tp, 57765_CLASS)) &&
Matt Carlsonb0c59432011-05-19 12:12:48 +00002294 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00002295 val = MII_TG3_DSP_TAP26_ALNOKO |
2296 MII_TG3_DSP_TAP26_RMRXSTO;
2297 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
Matt Carlsonb0c59432011-05-19 12:12:48 +00002298 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2299 }
2300
2301 val = tr32(TG3_CPMU_EEE_MODE);
2302 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2303}
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305static int tg3_wait_macro_done(struct tg3 *tp)
2306{
2307 int limit = 100;
2308
2309 while (limit--) {
2310 u32 tmp32;
2311
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002312 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 if ((tmp32 & 0x1000) == 0)
2314 break;
2315 }
2316 }
Roel Kluind4675b52009-02-12 16:33:27 -08002317 if (limit < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 return -EBUSY;
2319
2320 return 0;
2321}
2322
2323static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2324{
2325 static const u32 test_pat[4][6] = {
2326 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2327 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2328 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2329 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2330 };
2331 int chan;
2332
2333 for (chan = 0; chan < 4; chan++) {
2334 int i;
2335
2336 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2337 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002338 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
2340 for (i = 0; i < 6; i++)
2341 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2342 test_pat[chan][i]);
2343
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002344 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 if (tg3_wait_macro_done(tp)) {
2346 *resetp = 1;
2347 return -EBUSY;
2348 }
2349
2350 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2351 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002352 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 if (tg3_wait_macro_done(tp)) {
2354 *resetp = 1;
2355 return -EBUSY;
2356 }
2357
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002358 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (tg3_wait_macro_done(tp)) {
2360 *resetp = 1;
2361 return -EBUSY;
2362 }
2363
2364 for (i = 0; i < 6; i += 2) {
2365 u32 low, high;
2366
2367 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2368 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2369 tg3_wait_macro_done(tp)) {
2370 *resetp = 1;
2371 return -EBUSY;
2372 }
2373 low &= 0x7fff;
2374 high &= 0x000f;
2375 if (low != test_pat[chan][i] ||
2376 high != test_pat[chan][i+1]) {
2377 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2378 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2379 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2380
2381 return -EBUSY;
2382 }
2383 }
2384 }
2385
2386 return 0;
2387}
2388
2389static int tg3_phy_reset_chanpat(struct tg3 *tp)
2390{
2391 int chan;
2392
2393 for (chan = 0; chan < 4; chan++) {
2394 int i;
2395
2396 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2397 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002398 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 for (i = 0; i < 6; i++)
2400 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002401 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 if (tg3_wait_macro_done(tp))
2403 return -EBUSY;
2404 }
2405
2406 return 0;
2407}
2408
2409static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2410{
2411 u32 reg32, phy9_orig;
2412 int retries, do_phy_reset, err;
2413
2414 retries = 10;
2415 do_phy_reset = 1;
2416 do {
2417 if (do_phy_reset) {
2418 err = tg3_bmcr_reset(tp);
2419 if (err)
2420 return err;
2421 do_phy_reset = 0;
2422 }
2423
2424 /* Disable transmitter and interrupt. */
2425 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2426 continue;
2427
2428 reg32 |= 0x3000;
2429 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2430
2431 /* Set full-duplex, 1000 mbps. */
2432 tg3_writephy(tp, MII_BMCR,
Matt Carlson221c5632011-06-13 13:39:01 +00002433 BMCR_FULLDPLX | BMCR_SPEED1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
2435 /* Set to master mode. */
Matt Carlson221c5632011-06-13 13:39:01 +00002436 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 continue;
2438
Matt Carlson221c5632011-06-13 13:39:01 +00002439 tg3_writephy(tp, MII_CTRL1000,
2440 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
Matt Carlson1d36ba42011-04-20 07:57:42 +00002442 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2443 if (err)
2444 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
2446 /* Block the PHY control access. */
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002447 tg3_phydsp_write(tp, 0x8005, 0x0800);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
2449 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2450 if (!err)
2451 break;
2452 } while (--retries);
2453
2454 err = tg3_phy_reset_chanpat(tp);
2455 if (err)
2456 return err;
2457
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002458 tg3_phydsp_write(tp, 0x8005, 0x0000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
2460 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002461 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462
Matt Carlson1d36ba42011-04-20 07:57:42 +00002463 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
Matt Carlson221c5632011-06-13 13:39:01 +00002465 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
2467 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2468 reg32 &= ~0x3000;
2469 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2470 } else if (!err)
2471 err = -EBUSY;
2472
2473 return err;
2474}
2475
2476/* This will reset the tigon3 PHY if there is no valid
2477 * link unless the FORCE argument is non-zero.
2478 */
2479static int tg3_phy_reset(struct tg3 *tp)
2480{
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002481 u32 val, cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 int err;
2483
Michael Chan60189dd2006-12-17 17:08:07 -08002484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002485 val = tr32(GRC_MISC_CFG);
2486 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2487 udelay(40);
2488 }
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002489 err = tg3_readphy(tp, MII_BMSR, &val);
2490 err |= tg3_readphy(tp, MII_BMSR, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 if (err != 0)
2492 return -EBUSY;
2493
Michael Chanc8e1e822006-04-29 18:55:17 -07002494 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2495 netif_carrier_off(tp->dev);
2496 tg3_link_report(tp);
2497 }
2498
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2500 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2501 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2502 err = tg3_phy_reset_5703_4_5(tp);
2503 if (err)
2504 return err;
2505 goto out;
2506 }
2507
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002508 cpmuctrl = 0;
2509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2510 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2511 cpmuctrl = tr32(TG3_CPMU_CTRL);
2512 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2513 tw32(TG3_CPMU_CTRL,
2514 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2515 }
2516
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 err = tg3_bmcr_reset(tp);
2518 if (err)
2519 return err;
2520
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002521 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002522 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2523 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002524
2525 tw32(TG3_CPMU_CTRL, cpmuctrl);
2526 }
2527
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002528 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2529 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002530 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2531 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2532 CPMU_LSPD_1000MB_MACCLK_12_5) {
2533 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2534 udelay(40);
2535 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2536 }
2537 }
2538
Joe Perches63c3a662011-04-26 08:12:10 +00002539 if (tg3_flag(tp, 5717_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002540 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
Matt Carlsonecf14102010-01-20 16:58:05 +00002541 return 0;
2542
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002543 tg3_phy_apply_otp(tp);
2544
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002545 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
Matt Carlson6833c042008-11-21 17:18:59 -08002546 tg3_phy_toggle_apd(tp, true);
2547 else
2548 tg3_phy_toggle_apd(tp, false);
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550out:
Matt Carlson1d36ba42011-04-20 07:57:42 +00002551 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2552 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002553 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2554 tg3_phydsp_write(tp, 0x000a, 0x0323);
Matt Carlson1d36ba42011-04-20 07:57:42 +00002555 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002557
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002558 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002559 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2560 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002562
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002563 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
Matt Carlson1d36ba42011-04-20 07:57:42 +00002564 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2565 tg3_phydsp_write(tp, 0x000a, 0x310b);
2566 tg3_phydsp_write(tp, 0x201f, 0x9506);
2567 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2568 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2569 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002570 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
Matt Carlson1d36ba42011-04-20 07:57:42 +00002571 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2572 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2573 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2574 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2575 tg3_writephy(tp, MII_TG3_TEST1,
2576 MII_TG3_TEST1_TRIM_EN | 0x4);
2577 } else
2578 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2579
2580 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2581 }
Michael Chanc424cb22006-04-29 18:56:34 -07002582 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 /* Set Extended packet length bit (bit 14) on all chips that */
2585 /* support jumbo frames */
Matt Carlson79eb6902010-02-17 15:17:03 +00002586 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 /* Cannot do read-modify-write on 5401 */
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002588 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
Joe Perches63c3a662011-04-26 08:12:10 +00002589 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 /* Set bit 14 with read-modify-write to preserve other bits */
Matt Carlson15ee95c2011-04-20 07:57:40 +00002591 err = tg3_phy_auxctl_read(tp,
2592 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2593 if (!err)
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002594 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2595 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 }
2597
2598 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2599 * jumbo frames transmission.
2600 */
Joe Perches63c3a662011-04-26 08:12:10 +00002601 if (tg3_flag(tp, JUMBO_CAPABLE)) {
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002602 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
Matt Carlsonc6cdf432010-04-05 10:19:26 +00002603 tg3_writephy(tp, MII_TG3_EXT_CTRL,
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002604 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 }
2606
Michael Chan715116a2006-09-27 16:09:25 -07002607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07002608 /* adjust output voltage */
Matt Carlson535ef6e2009-08-25 10:09:36 +00002609 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07002610 }
2611
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002612 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 tg3_phy_set_wirespeed(tp);
2614 return 0;
2615}
2616
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002617#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2618#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2619#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2620 TG3_GPIO_MSG_NEED_VAUX)
2621#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2622 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2623 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2624 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2625 (TG3_GPIO_MSG_DRVR_PRES << 12))
2626
2627#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2628 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2629 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2630 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2631 (TG3_GPIO_MSG_NEED_VAUX << 12))
2632
2633static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2634{
2635 u32 status, shift;
2636
2637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2639 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2640 else
2641 status = tr32(TG3_CPMU_DRV_STATUS);
2642
2643 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2644 status &= ~(TG3_GPIO_MSG_MASK << shift);
2645 status |= (newstat << shift);
2646
2647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2649 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2650 else
2651 tw32(TG3_CPMU_DRV_STATUS, status);
2652
2653 return status >> TG3_APE_GPIO_MSG_SHIFT;
2654}
2655
Matt Carlson520b2752011-06-13 13:39:02 +00002656static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2657{
2658 if (!tg3_flag(tp, IS_NIC))
2659 return 0;
2660
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2663 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2664 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2665 return -EIO;
Matt Carlson520b2752011-06-13 13:39:02 +00002666
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002667 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2668
2669 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2670 TG3_GRC_LCLCTL_PWRSW_DELAY);
2671
2672 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2673 } else {
2674 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2675 TG3_GRC_LCLCTL_PWRSW_DELAY);
2676 }
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002677
Matt Carlson520b2752011-06-13 13:39:02 +00002678 return 0;
2679}
2680
2681static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2682{
2683 u32 grc_local_ctrl;
2684
2685 if (!tg3_flag(tp, IS_NIC) ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2688 return;
2689
2690 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2691
2692 tw32_wait_f(GRC_LOCAL_CTRL,
2693 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2694 TG3_GRC_LCLCTL_PWRSW_DELAY);
2695
2696 tw32_wait_f(GRC_LOCAL_CTRL,
2697 grc_local_ctrl,
2698 TG3_GRC_LCLCTL_PWRSW_DELAY);
2699
2700 tw32_wait_f(GRC_LOCAL_CTRL,
2701 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2702 TG3_GRC_LCLCTL_PWRSW_DELAY);
2703}
2704
2705static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2706{
2707 if (!tg3_flag(tp, IS_NIC))
2708 return;
2709
2710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2712 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2713 (GRC_LCLCTRL_GPIO_OE0 |
2714 GRC_LCLCTRL_GPIO_OE1 |
2715 GRC_LCLCTRL_GPIO_OE2 |
2716 GRC_LCLCTRL_GPIO_OUTPUT0 |
2717 GRC_LCLCTRL_GPIO_OUTPUT1),
2718 TG3_GRC_LCLCTL_PWRSW_DELAY);
2719 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2721 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2722 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2723 GRC_LCLCTRL_GPIO_OE1 |
2724 GRC_LCLCTRL_GPIO_OE2 |
2725 GRC_LCLCTRL_GPIO_OUTPUT0 |
2726 GRC_LCLCTRL_GPIO_OUTPUT1 |
2727 tp->grc_local_ctrl;
2728 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2729 TG3_GRC_LCLCTL_PWRSW_DELAY);
2730
2731 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2732 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2733 TG3_GRC_LCLCTL_PWRSW_DELAY);
2734
2735 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2736 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2737 TG3_GRC_LCLCTL_PWRSW_DELAY);
2738 } else {
2739 u32 no_gpio2;
2740 u32 grc_local_ctrl = 0;
2741
2742 /* Workaround to prevent overdrawing Amps. */
2743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2744 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2745 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2746 grc_local_ctrl,
2747 TG3_GRC_LCLCTL_PWRSW_DELAY);
2748 }
2749
2750 /* On 5753 and variants, GPIO2 cannot be used. */
2751 no_gpio2 = tp->nic_sram_data_cfg &
2752 NIC_SRAM_DATA_CFG_NO_GPIO2;
2753
2754 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2755 GRC_LCLCTRL_GPIO_OE1 |
2756 GRC_LCLCTRL_GPIO_OE2 |
2757 GRC_LCLCTRL_GPIO_OUTPUT1 |
2758 GRC_LCLCTRL_GPIO_OUTPUT2;
2759 if (no_gpio2) {
2760 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2761 GRC_LCLCTRL_GPIO_OUTPUT2);
2762 }
2763 tw32_wait_f(GRC_LOCAL_CTRL,
2764 tp->grc_local_ctrl | grc_local_ctrl,
2765 TG3_GRC_LCLCTL_PWRSW_DELAY);
2766
2767 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2768
2769 tw32_wait_f(GRC_LOCAL_CTRL,
2770 tp->grc_local_ctrl | grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2772
2773 if (!no_gpio2) {
2774 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2775 tw32_wait_f(GRC_LOCAL_CTRL,
2776 tp->grc_local_ctrl | grc_local_ctrl,
2777 TG3_GRC_LCLCTL_PWRSW_DELAY);
2778 }
2779 }
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002780}
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002781
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002782static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002783{
2784 u32 msg = 0;
2785
2786 /* Serialize power state transitions */
2787 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2788 return;
2789
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002790 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002791 msg = TG3_GPIO_MSG_NEED_VAUX;
2792
2793 msg = tg3_set_function_status(tp, msg);
2794
2795 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2796 goto done;
2797
2798 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2799 tg3_pwrsrc_switch_to_vaux(tp);
2800 else
2801 tg3_pwrsrc_die_with_vmain(tp);
2802
2803done:
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002804 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
Matt Carlson520b2752011-06-13 13:39:02 +00002805}
2806
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002807static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808{
Matt Carlson683644b2011-03-09 16:58:23 +00002809 bool need_vaux = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
Matt Carlson334355a2010-01-20 16:58:10 +00002811 /* The GPIOs do something completely different on 57765. */
Matt Carlson55086ad2011-12-14 11:09:59 +00002812 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 return;
2814
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002818 tg3_frob_aux_power_5717(tp, include_wol ?
2819 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002820 return;
2821 }
2822
2823 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002824 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002826 dev_peer = pci_get_drvdata(tp->pdev_peer);
Matt Carlson683644b2011-03-09 16:58:23 +00002827
Michael Chanbc1c7562006-03-20 17:48:03 -08002828 /* remove_one() may have been run on the peer. */
Matt Carlson683644b2011-03-09 16:58:23 +00002829 if (dev_peer) {
2830 struct tg3 *tp_peer = netdev_priv(dev_peer);
2831
Joe Perches63c3a662011-04-26 08:12:10 +00002832 if (tg3_flag(tp_peer, INIT_COMPLETE))
Matt Carlson683644b2011-03-09 16:58:23 +00002833 return;
2834
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002835 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
Joe Perches63c3a662011-04-26 08:12:10 +00002836 tg3_flag(tp_peer, ENABLE_ASF))
Matt Carlson683644b2011-03-09 16:58:23 +00002837 need_vaux = true;
2838 }
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002841 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2842 tg3_flag(tp, ENABLE_ASF))
Matt Carlson683644b2011-03-09 16:58:23 +00002843 need_vaux = true;
2844
Matt Carlson520b2752011-06-13 13:39:02 +00002845 if (need_vaux)
2846 tg3_pwrsrc_switch_to_vaux(tp);
2847 else
2848 tg3_pwrsrc_die_with_vmain(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849}
2850
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002851static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2852{
2853 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2854 return 1;
Matt Carlson79eb6902010-02-17 15:17:03 +00002855 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002856 if (speed != SPEED_10)
2857 return 1;
2858 } else if (speed == SPEED_10)
2859 return 1;
2860
2861 return 0;
2862}
2863
Matt Carlson0a459aa2008-11-03 16:54:15 -08002864static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002865{
Matt Carlsonce057f02007-11-12 21:08:03 -08002866 u32 val;
2867
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002868 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Michael Chan51297242007-02-13 12:17:57 -08002869 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2870 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2871 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2872
2873 sg_dig_ctrl |=
2874 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2875 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2876 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2877 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002878 return;
Michael Chan51297242007-02-13 12:17:57 -08002879 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002880
Michael Chan60189dd2006-12-17 17:08:07 -08002881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002882 tg3_bmcr_reset(tp);
2883 val = tr32(GRC_MISC_CFG);
2884 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2885 udelay(40);
2886 return;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002887 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson0e5f7842009-11-02 14:26:38 +00002888 u32 phytest;
2889 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2890 u32 phy;
2891
2892 tg3_writephy(tp, MII_ADVERTISE, 0);
2893 tg3_writephy(tp, MII_BMCR,
2894 BMCR_ANENABLE | BMCR_ANRESTART);
2895
2896 tg3_writephy(tp, MII_TG3_FET_TEST,
2897 phytest | MII_TG3_FET_SHADOW_EN);
2898 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2899 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2900 tg3_writephy(tp,
2901 MII_TG3_FET_SHDW_AUXMODE4,
2902 phy);
2903 }
2904 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2905 }
2906 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002907 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002908 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2909 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002910
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002911 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2912 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2913 MII_TG3_AUXCTL_PCTL_VREG_11V;
2914 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
Michael Chan715116a2006-09-27 16:09:25 -07002915 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002916
Michael Chan15c3b692006-03-22 01:06:52 -08002917 /* The PHY should not be powered down on some chips because
2918 * of bugs.
2919 */
2920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2922 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
Matt Carlson085f1af2012-04-02 09:01:40 +00002923 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2924 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2925 !tp->pci_fn))
Michael Chan15c3b692006-03-22 01:06:52 -08002926 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002927
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002928 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2929 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002930 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2931 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2932 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2933 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2934 }
2935
Michael Chan15c3b692006-03-22 01:06:52 -08002936 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2937}
2938
Matt Carlson3f007892008-11-03 16:51:36 -08002939/* tp->lock is held. */
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002940static int tg3_nvram_lock(struct tg3 *tp)
2941{
Joe Perches63c3a662011-04-26 08:12:10 +00002942 if (tg3_flag(tp, NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002943 int i;
2944
2945 if (tp->nvram_lock_cnt == 0) {
2946 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2947 for (i = 0; i < 8000; i++) {
2948 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2949 break;
2950 udelay(20);
2951 }
2952 if (i == 8000) {
2953 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2954 return -ENODEV;
2955 }
2956 }
2957 tp->nvram_lock_cnt++;
2958 }
2959 return 0;
2960}
2961
2962/* tp->lock is held. */
2963static void tg3_nvram_unlock(struct tg3 *tp)
2964{
Joe Perches63c3a662011-04-26 08:12:10 +00002965 if (tg3_flag(tp, NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002966 if (tp->nvram_lock_cnt > 0)
2967 tp->nvram_lock_cnt--;
2968 if (tp->nvram_lock_cnt == 0)
2969 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2970 }
2971}
2972
2973/* tp->lock is held. */
2974static void tg3_enable_nvram_access(struct tg3 *tp)
2975{
Joe Perches63c3a662011-04-26 08:12:10 +00002976 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002977 u32 nvaccess = tr32(NVRAM_ACCESS);
2978
2979 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2980 }
2981}
2982
2983/* tp->lock is held. */
2984static void tg3_disable_nvram_access(struct tg3 *tp)
2985{
Joe Perches63c3a662011-04-26 08:12:10 +00002986 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00002987 u32 nvaccess = tr32(NVRAM_ACCESS);
2988
2989 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2990 }
2991}
2992
2993static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2994 u32 offset, u32 *val)
2995{
2996 u32 tmp;
2997 int i;
2998
2999 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3000 return -EINVAL;
3001
3002 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3003 EEPROM_ADDR_DEVID_MASK |
3004 EEPROM_ADDR_READ);
3005 tw32(GRC_EEPROM_ADDR,
3006 tmp |
3007 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3008 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3009 EEPROM_ADDR_ADDR_MASK) |
3010 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3011
3012 for (i = 0; i < 1000; i++) {
3013 tmp = tr32(GRC_EEPROM_ADDR);
3014
3015 if (tmp & EEPROM_ADDR_COMPLETE)
3016 break;
3017 msleep(1);
3018 }
3019 if (!(tmp & EEPROM_ADDR_COMPLETE))
3020 return -EBUSY;
3021
Matt Carlson62cedd12009-04-20 14:52:29 -07003022 tmp = tr32(GRC_EEPROM_DATA);
3023
3024 /*
3025 * The data will always be opposite the native endian
3026 * format. Perform a blind byteswap to compensate.
3027 */
3028 *val = swab32(tmp);
3029
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003030 return 0;
3031}
3032
3033#define NVRAM_CMD_TIMEOUT 10000
3034
3035static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3036{
3037 int i;
3038
3039 tw32(NVRAM_CMD, nvram_cmd);
3040 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3041 udelay(10);
3042 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3043 udelay(10);
3044 break;
3045 }
3046 }
3047
3048 if (i == NVRAM_CMD_TIMEOUT)
3049 return -EBUSY;
3050
3051 return 0;
3052}
3053
3054static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3055{
Joe Perches63c3a662011-04-26 08:12:10 +00003056 if (tg3_flag(tp, NVRAM) &&
3057 tg3_flag(tp, NVRAM_BUFFERED) &&
3058 tg3_flag(tp, FLASH) &&
3059 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003060 (tp->nvram_jedecnum == JEDEC_ATMEL))
3061
3062 addr = ((addr / tp->nvram_pagesize) <<
3063 ATMEL_AT45DB0X1B_PAGE_POS) +
3064 (addr % tp->nvram_pagesize);
3065
3066 return addr;
3067}
3068
3069static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3070{
Joe Perches63c3a662011-04-26 08:12:10 +00003071 if (tg3_flag(tp, NVRAM) &&
3072 tg3_flag(tp, NVRAM_BUFFERED) &&
3073 tg3_flag(tp, FLASH) &&
3074 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003075 (tp->nvram_jedecnum == JEDEC_ATMEL))
3076
3077 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3078 tp->nvram_pagesize) +
3079 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3080
3081 return addr;
3082}
3083
Matt Carlsone4f34112009-02-25 14:25:00 +00003084/* NOTE: Data read in from NVRAM is byteswapped according to
3085 * the byteswapping settings for all other register accesses.
3086 * tg3 devices are BE devices, so on a BE machine, the data
3087 * returned will be exactly as it is seen in NVRAM. On a LE
3088 * machine, the 32-bit value will be byteswapped.
3089 */
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003090static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3091{
3092 int ret;
3093
Joe Perches63c3a662011-04-26 08:12:10 +00003094 if (!tg3_flag(tp, NVRAM))
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003095 return tg3_nvram_read_using_eeprom(tp, offset, val);
3096
3097 offset = tg3_nvram_phys_addr(tp, offset);
3098
3099 if (offset > NVRAM_ADDR_MSK)
3100 return -EINVAL;
3101
3102 ret = tg3_nvram_lock(tp);
3103 if (ret)
3104 return ret;
3105
3106 tg3_enable_nvram_access(tp);
3107
3108 tw32(NVRAM_ADDR, offset);
3109 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3110 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3111
3112 if (ret == 0)
Matt Carlsone4f34112009-02-25 14:25:00 +00003113 *val = tr32(NVRAM_RDDATA);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003114
3115 tg3_disable_nvram_access(tp);
3116
3117 tg3_nvram_unlock(tp);
3118
3119 return ret;
3120}
3121
Matt Carlsona9dc5292009-02-25 14:25:30 +00003122/* Ensures NVRAM data is in bytestream format. */
3123static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003124{
3125 u32 v;
Matt Carlsona9dc5292009-02-25 14:25:30 +00003126 int res = tg3_nvram_read(tp, offset, &v);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003127 if (!res)
Matt Carlsona9dc5292009-02-25 14:25:30 +00003128 *val = cpu_to_be32(v);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003129 return res;
3130}
3131
Matt Carlsondbe9b922012-02-13 10:20:09 +00003132static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3133 u32 offset, u32 len, u8 *buf)
3134{
3135 int i, j, rc = 0;
3136 u32 val;
3137
3138 for (i = 0; i < len; i += 4) {
3139 u32 addr;
3140 __be32 data;
3141
3142 addr = offset + i;
3143
3144 memcpy(&data, buf + i, 4);
3145
3146 /*
3147 * The SEEPROM interface expects the data to always be opposite
3148 * the native endian format. We accomplish this by reversing
3149 * all the operations that would have been performed on the
3150 * data from a call to tg3_nvram_read_be32().
3151 */
3152 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3153
3154 val = tr32(GRC_EEPROM_ADDR);
3155 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3156
3157 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3158 EEPROM_ADDR_READ);
3159 tw32(GRC_EEPROM_ADDR, val |
3160 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3161 (addr & EEPROM_ADDR_ADDR_MASK) |
3162 EEPROM_ADDR_START |
3163 EEPROM_ADDR_WRITE);
3164
3165 for (j = 0; j < 1000; j++) {
3166 val = tr32(GRC_EEPROM_ADDR);
3167
3168 if (val & EEPROM_ADDR_COMPLETE)
3169 break;
3170 msleep(1);
3171 }
3172 if (!(val & EEPROM_ADDR_COMPLETE)) {
3173 rc = -EBUSY;
3174 break;
3175 }
3176 }
3177
3178 return rc;
3179}
3180
3181/* offset and length are dword aligned */
3182static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3183 u8 *buf)
3184{
3185 int ret = 0;
3186 u32 pagesize = tp->nvram_pagesize;
3187 u32 pagemask = pagesize - 1;
3188 u32 nvram_cmd;
3189 u8 *tmp;
3190
3191 tmp = kmalloc(pagesize, GFP_KERNEL);
3192 if (tmp == NULL)
3193 return -ENOMEM;
3194
3195 while (len) {
3196 int j;
3197 u32 phy_addr, page_off, size;
3198
3199 phy_addr = offset & ~pagemask;
3200
3201 for (j = 0; j < pagesize; j += 4) {
3202 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3203 (__be32 *) (tmp + j));
3204 if (ret)
3205 break;
3206 }
3207 if (ret)
3208 break;
3209
3210 page_off = offset & pagemask;
3211 size = pagesize;
3212 if (len < size)
3213 size = len;
3214
3215 len -= size;
3216
3217 memcpy(tmp + page_off, buf, size);
3218
3219 offset = offset + (pagesize - page_off);
3220
3221 tg3_enable_nvram_access(tp);
3222
3223 /*
3224 * Before we can erase the flash page, we need
3225 * to issue a special "write enable" command.
3226 */
3227 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3228
3229 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3230 break;
3231
3232 /* Erase the target page */
3233 tw32(NVRAM_ADDR, phy_addr);
3234
3235 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3236 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3237
3238 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3239 break;
3240
3241 /* Issue another write enable to start the write. */
3242 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3243
3244 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3245 break;
3246
3247 for (j = 0; j < pagesize; j += 4) {
3248 __be32 data;
3249
3250 data = *((__be32 *) (tmp + j));
3251
3252 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3253
3254 tw32(NVRAM_ADDR, phy_addr + j);
3255
3256 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3257 NVRAM_CMD_WR;
3258
3259 if (j == 0)
3260 nvram_cmd |= NVRAM_CMD_FIRST;
3261 else if (j == (pagesize - 4))
3262 nvram_cmd |= NVRAM_CMD_LAST;
3263
3264 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3265 if (ret)
3266 break;
3267 }
3268 if (ret)
3269 break;
3270 }
3271
3272 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3273 tg3_nvram_exec_cmd(tp, nvram_cmd);
3274
3275 kfree(tmp);
3276
3277 return ret;
3278}
3279
3280/* offset and length are dword aligned */
3281static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3282 u8 *buf)
3283{
3284 int i, ret = 0;
3285
3286 for (i = 0; i < len; i += 4, offset += 4) {
3287 u32 page_off, phy_addr, nvram_cmd;
3288 __be32 data;
3289
3290 memcpy(&data, buf + i, 4);
3291 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3292
3293 page_off = offset % tp->nvram_pagesize;
3294
3295 phy_addr = tg3_nvram_phys_addr(tp, offset);
3296
Matt Carlsondbe9b922012-02-13 10:20:09 +00003297 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3298
3299 if (page_off == 0 || i == 0)
3300 nvram_cmd |= NVRAM_CMD_FIRST;
3301 if (page_off == (tp->nvram_pagesize - 4))
3302 nvram_cmd |= NVRAM_CMD_LAST;
3303
3304 if (i == (len - 4))
3305 nvram_cmd |= NVRAM_CMD_LAST;
3306
Matt Carlson42278222012-02-13 15:20:11 +00003307 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3308 !tg3_flag(tp, FLASH) ||
3309 !tg3_flag(tp, 57765_PLUS))
3310 tw32(NVRAM_ADDR, phy_addr);
3311
Matt Carlsondbe9b922012-02-13 10:20:09 +00003312 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3313 !tg3_flag(tp, 5755_PLUS) &&
3314 (tp->nvram_jedecnum == JEDEC_ST) &&
3315 (nvram_cmd & NVRAM_CMD_FIRST)) {
3316 u32 cmd;
3317
3318 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3319 ret = tg3_nvram_exec_cmd(tp, cmd);
3320 if (ret)
3321 break;
3322 }
3323 if (!tg3_flag(tp, FLASH)) {
3324 /* We always do complete word writes to eeprom. */
3325 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3326 }
3327
3328 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329 if (ret)
3330 break;
3331 }
3332 return ret;
3333}
3334
3335/* offset and length are dword aligned */
3336static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3337{
3338 int ret;
3339
3340 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3341 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3342 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3343 udelay(40);
3344 }
3345
3346 if (!tg3_flag(tp, NVRAM)) {
3347 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3348 } else {
3349 u32 grc_mode;
3350
3351 ret = tg3_nvram_lock(tp);
3352 if (ret)
3353 return ret;
3354
3355 tg3_enable_nvram_access(tp);
3356 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3357 tw32(NVRAM_WRITE1, 0x406);
3358
3359 grc_mode = tr32(GRC_MODE);
3360 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3361
3362 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3363 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3364 buf);
3365 } else {
3366 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3367 buf);
3368 }
3369
3370 grc_mode = tr32(GRC_MODE);
3371 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3372
3373 tg3_disable_nvram_access(tp);
3374 tg3_nvram_unlock(tp);
3375 }
3376
3377 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3378 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3379 udelay(40);
3380 }
3381
3382 return ret;
3383}
3384
Matt Carlson997b4f12011-08-31 11:44:53 +00003385#define RX_CPU_SCRATCH_BASE 0x30000
3386#define RX_CPU_SCRATCH_SIZE 0x04000
3387#define TX_CPU_SCRATCH_BASE 0x34000
3388#define TX_CPU_SCRATCH_SIZE 0x04000
3389
3390/* tp->lock is held. */
3391static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3392{
3393 int i;
3394
3395 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3396
3397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3398 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3399
3400 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3401 return 0;
3402 }
3403 if (offset == RX_CPU_BASE) {
3404 for (i = 0; i < 10000; i++) {
3405 tw32(offset + CPU_STATE, 0xffffffff);
3406 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3407 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3408 break;
3409 }
3410
3411 tw32(offset + CPU_STATE, 0xffffffff);
3412 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3413 udelay(10);
3414 } else {
3415 for (i = 0; i < 10000; i++) {
3416 tw32(offset + CPU_STATE, 0xffffffff);
3417 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3418 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3419 break;
3420 }
3421 }
3422
3423 if (i >= 10000) {
3424 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3425 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3426 return -ENODEV;
3427 }
3428
3429 /* Clear firmware's nvram arbitration. */
3430 if (tg3_flag(tp, NVRAM))
3431 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3432 return 0;
3433}
3434
3435struct fw_info {
3436 unsigned int fw_base;
3437 unsigned int fw_len;
3438 const __be32 *fw_data;
3439};
3440
3441/* tp->lock is held. */
3442static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3443 u32 cpu_scratch_base, int cpu_scratch_size,
3444 struct fw_info *info)
3445{
3446 int err, lock_err, i;
3447 void (*write_op)(struct tg3 *, u32, u32);
3448
3449 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3450 netdev_err(tp->dev,
3451 "%s: Trying to load TX cpu firmware which is 5705\n",
3452 __func__);
3453 return -EINVAL;
3454 }
3455
3456 if (tg3_flag(tp, 5705_PLUS))
3457 write_op = tg3_write_mem;
3458 else
3459 write_op = tg3_write_indirect_reg32;
3460
3461 /* It is possible that bootcode is still loading at this point.
3462 * Get the nvram lock first before halting the cpu.
3463 */
3464 lock_err = tg3_nvram_lock(tp);
3465 err = tg3_halt_cpu(tp, cpu_base);
3466 if (!lock_err)
3467 tg3_nvram_unlock(tp);
3468 if (err)
3469 goto out;
3470
3471 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3472 write_op(tp, cpu_scratch_base + i, 0);
3473 tw32(cpu_base + CPU_STATE, 0xffffffff);
3474 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3475 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3476 write_op(tp, (cpu_scratch_base +
3477 (info->fw_base & 0xffff) +
3478 (i * sizeof(u32))),
3479 be32_to_cpu(info->fw_data[i]));
3480
3481 err = 0;
3482
3483out:
3484 return err;
3485}
3486
3487/* tp->lock is held. */
3488static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3489{
3490 struct fw_info info;
3491 const __be32 *fw_data;
3492 int err, i;
3493
3494 fw_data = (void *)tp->fw->data;
3495
3496 /* Firmware blob starts with version numbers, followed by
3497 start address and length. We are setting complete length.
3498 length = end_address_of_bss - start_address_of_text.
3499 Remainder is the blob to be loaded contiguously
3500 from start address. */
3501
3502 info.fw_base = be32_to_cpu(fw_data[1]);
3503 info.fw_len = tp->fw->size - 12;
3504 info.fw_data = &fw_data[3];
3505
3506 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3507 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3508 &info);
3509 if (err)
3510 return err;
3511
3512 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3513 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3514 &info);
3515 if (err)
3516 return err;
3517
3518 /* Now startup only the RX cpu. */
3519 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3520 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3521
3522 for (i = 0; i < 5; i++) {
3523 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3524 break;
3525 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3526 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3527 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3528 udelay(1000);
3529 }
3530 if (i >= 5) {
3531 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3532 "should be %08x\n", __func__,
3533 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3534 return -ENODEV;
3535 }
3536 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3537 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3538
3539 return 0;
3540}
3541
3542/* tp->lock is held. */
3543static int tg3_load_tso_firmware(struct tg3 *tp)
3544{
3545 struct fw_info info;
3546 const __be32 *fw_data;
3547 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3548 int err, i;
3549
3550 if (tg3_flag(tp, HW_TSO_1) ||
3551 tg3_flag(tp, HW_TSO_2) ||
3552 tg3_flag(tp, HW_TSO_3))
3553 return 0;
3554
3555 fw_data = (void *)tp->fw->data;
3556
3557 /* Firmware blob starts with version numbers, followed by
3558 start address and length. We are setting complete length.
3559 length = end_address_of_bss - start_address_of_text.
3560 Remainder is the blob to be loaded contiguously
3561 from start address. */
3562
3563 info.fw_base = be32_to_cpu(fw_data[1]);
3564 cpu_scratch_size = tp->fw_len;
3565 info.fw_len = tp->fw->size - 12;
3566 info.fw_data = &fw_data[3];
3567
3568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3569 cpu_base = RX_CPU_BASE;
3570 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3571 } else {
3572 cpu_base = TX_CPU_BASE;
3573 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3574 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3575 }
3576
3577 err = tg3_load_firmware_cpu(tp, cpu_base,
3578 cpu_scratch_base, cpu_scratch_size,
3579 &info);
3580 if (err)
3581 return err;
3582
3583 /* Now startup the cpu. */
3584 tw32(cpu_base + CPU_STATE, 0xffffffff);
3585 tw32_f(cpu_base + CPU_PC, info.fw_base);
3586
3587 for (i = 0; i < 5; i++) {
3588 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3589 break;
3590 tw32(cpu_base + CPU_STATE, 0xffffffff);
3591 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3592 tw32_f(cpu_base + CPU_PC, info.fw_base);
3593 udelay(1000);
3594 }
3595 if (i >= 5) {
3596 netdev_err(tp->dev,
3597 "%s fails to set CPU PC, is %08x should be %08x\n",
3598 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3599 return -ENODEV;
3600 }
3601 tw32(cpu_base + CPU_STATE, 0xffffffff);
3602 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3603 return 0;
3604}
3605
3606
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003607/* tp->lock is held. */
Matt Carlson3f007892008-11-03 16:51:36 -08003608static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3609{
3610 u32 addr_high, addr_low;
3611 int i;
3612
3613 addr_high = ((tp->dev->dev_addr[0] << 8) |
3614 tp->dev->dev_addr[1]);
3615 addr_low = ((tp->dev->dev_addr[2] << 24) |
3616 (tp->dev->dev_addr[3] << 16) |
3617 (tp->dev->dev_addr[4] << 8) |
3618 (tp->dev->dev_addr[5] << 0));
3619 for (i = 0; i < 4; i++) {
3620 if (i == 1 && skip_mac_1)
3621 continue;
3622 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3623 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3624 }
3625
3626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3628 for (i = 0; i < 12; i++) {
3629 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3630 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3631 }
3632 }
3633
3634 addr_high = (tp->dev->dev_addr[0] +
3635 tp->dev->dev_addr[1] +
3636 tp->dev->dev_addr[2] +
3637 tp->dev->dev_addr[3] +
3638 tp->dev->dev_addr[4] +
3639 tp->dev->dev_addr[5]) &
3640 TX_BACKOFF_SEED_MASK;
3641 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3642}
3643
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003644static void tg3_enable_register_access(struct tg3 *tp)
3645{
3646 /*
3647 * Make sure register accesses (indirect or otherwise) will function
3648 * correctly.
3649 */
3650 pci_write_config_dword(tp->pdev,
3651 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3652}
3653
3654static int tg3_power_up(struct tg3 *tp)
3655{
Matt Carlsonbed98292011-07-13 09:27:29 +00003656 int err;
3657
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003658 tg3_enable_register_access(tp);
3659
Matt Carlsonbed98292011-07-13 09:27:29 +00003660 err = pci_set_power_state(tp->pdev, PCI_D0);
3661 if (!err) {
3662 /* Switch out of Vaux if it is a NIC */
3663 tg3_pwrsrc_switch_to_vmain(tp);
3664 } else {
3665 netdev_err(tp->dev, "Transition to D0 failed\n");
3666 }
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003667
Matt Carlsonbed98292011-07-13 09:27:29 +00003668 return err;
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003669}
3670
Matt Carlson4b409522012-02-13 10:20:11 +00003671static int tg3_setup_phy(struct tg3 *, int);
3672
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003673static int tg3_power_down_prepare(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674{
3675 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003676 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003678 tg3_enable_register_access(tp);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08003679
3680 /* Restore the CLKREQ setting. */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06003681 if (tg3_flag(tp, CLKREQ_BUG))
3682 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3683 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08003684
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3686 tw32(TG3PCI_MISC_HOST_CTRL,
3687 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3688
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003689 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
Joe Perches63c3a662011-04-26 08:12:10 +00003690 tg3_flag(tp, WOL_ENABLE);
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003691
Joe Perches63c3a662011-04-26 08:12:10 +00003692 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08003693 do_low_power = false;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003694 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
Matt Carlson800960682010-08-02 11:26:06 +00003695 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003696 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003697 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003698
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00003699 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003700
Matt Carlson800960682010-08-02 11:26:06 +00003701 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003702
Matt Carlsonc6700ce2012-02-13 15:20:15 +00003703 tp->link_config.speed = phydev->speed;
3704 tp->link_config.duplex = phydev->duplex;
3705 tp->link_config.autoneg = phydev->autoneg;
3706 tp->link_config.advertising = phydev->advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003707
3708 advertising = ADVERTISED_TP |
3709 ADVERTISED_Pause |
3710 ADVERTISED_Autoneg |
3711 ADVERTISED_10baseT_Half;
3712
Joe Perches63c3a662011-04-26 08:12:10 +00003713 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3714 if (tg3_flag(tp, WOL_SPEED_100MB))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003715 advertising |=
3716 ADVERTISED_100baseT_Half |
3717 ADVERTISED_100baseT_Full |
3718 ADVERTISED_10baseT_Full;
3719 else
3720 advertising |= ADVERTISED_10baseT_Full;
3721 }
3722
3723 phydev->advertising = advertising;
3724
3725 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08003726
3727 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
Matt Carlson6a443a02010-02-17 15:17:04 +00003728 if (phyid != PHY_ID_BCMAC131) {
3729 phyid &= PHY_BCM_OUI_MASK;
3730 if (phyid == PHY_BCM_OUI_1 ||
3731 phyid == PHY_BCM_OUI_2 ||
3732 phyid == PHY_BCM_OUI_3)
Matt Carlson0a459aa2008-11-03 16:54:15 -08003733 do_low_power = true;
3734 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003735 }
Matt Carlsondd477002008-05-25 23:45:58 -07003736 } else {
Matt Carlson20232762008-12-21 20:18:56 -08003737 do_low_power = true;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003738
Matt Carlsonc6700ce2012-02-13 15:20:15 +00003739 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
Matt Carlson800960682010-08-02 11:26:06 +00003740 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741
Matt Carlson2855b9f2012-02-13 15:20:14 +00003742 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlsondd477002008-05-25 23:45:58 -07003743 tg3_setup_phy(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 }
3745
Michael Chanb5d37722006-09-27 16:06:21 -07003746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3747 u32 val;
3748
3749 val = tr32(GRC_VCPU_EXT_CTRL);
3750 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
Joe Perches63c3a662011-04-26 08:12:10 +00003751 } else if (!tg3_flag(tp, ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08003752 int i;
3753 u32 val;
3754
3755 for (i = 0; i < 200; i++) {
3756 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3757 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3758 break;
3759 msleep(1);
3760 }
3761 }
Joe Perches63c3a662011-04-26 08:12:10 +00003762 if (tg3_flag(tp, WOL_CAP))
Gary Zambranoa85feb82007-05-05 11:52:19 -07003763 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3764 WOL_DRV_STATE_SHUTDOWN |
3765 WOL_DRV_WOL |
3766 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08003767
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003768 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 u32 mac_mode;
3770
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003771 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
Matt Carlsonb4bd2922011-04-20 07:57:41 +00003772 if (do_low_power &&
3773 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3774 tg3_phy_auxctl_write(tp,
3775 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3776 MII_TG3_AUXCTL_PCTL_WOL_EN |
3777 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3778 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
Matt Carlsondd477002008-05-25 23:45:58 -07003779 udelay(40);
3780 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003782 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chan3f7045c2006-09-27 16:02:29 -07003783 mac_mode = MAC_MODE_PORT_MODE_GMII;
3784 else
3785 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003787 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3788 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3789 ASIC_REV_5700) {
Joe Perches63c3a662011-04-26 08:12:10 +00003790 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003791 SPEED_100 : SPEED_10;
3792 if (tg3_5700_link_polarity(tp, speed))
3793 mac_mode |= MAC_MODE_LINK_POLARITY;
3794 else
3795 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3796 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 } else {
3798 mac_mode = MAC_MODE_PORT_MODE_TBI;
3799 }
3800
Joe Perches63c3a662011-04-26 08:12:10 +00003801 if (!tg3_flag(tp, 5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 tw32(MAC_LED_CTRL, tp->led_ctrl);
3803
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003804 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00003805 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3806 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003807 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808
Joe Perches63c3a662011-04-26 08:12:10 +00003809 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsond2394e6b2010-11-24 08:31:47 +00003810 mac_mode |= MAC_MODE_APE_TX_EN |
3811 MAC_MODE_APE_RX_EN |
3812 MAC_MODE_TDE_ENABLE;
Matt Carlson3bda1252008-08-15 14:08:22 -07003813
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 tw32_f(MAC_MODE, mac_mode);
3815 udelay(100);
3816
3817 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3818 udelay(10);
3819 }
3820
Joe Perches63c3a662011-04-26 08:12:10 +00003821 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3824 u32 base_val;
3825
3826 base_val = tp->pci_clock_ctrl;
3827 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3828 CLOCK_CTRL_TXCLK_DISABLE);
3829
Michael Chanb401e9e2005-12-19 16:27:04 -08003830 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3831 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Joe Perches63c3a662011-04-26 08:12:10 +00003832 } else if (tg3_flag(tp, 5780_CLASS) ||
3833 tg3_flag(tp, CPMU_PRESENT) ||
Matt Carlson6ff6f812011-05-19 12:12:54 +00003834 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan4cf78e42005-07-25 12:29:19 -07003835 /* do nothing */
Joe Perches63c3a662011-04-26 08:12:10 +00003836 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837 u32 newbits1, newbits2;
3838
3839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3841 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3842 CLOCK_CTRL_TXCLK_DISABLE |
3843 CLOCK_CTRL_ALTCLK);
3844 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
Joe Perches63c3a662011-04-26 08:12:10 +00003845 } else if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 newbits1 = CLOCK_CTRL_625_CORE;
3847 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3848 } else {
3849 newbits1 = CLOCK_CTRL_ALTCLK;
3850 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3851 }
3852
Michael Chanb401e9e2005-12-19 16:27:04 -08003853 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3854 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855
Michael Chanb401e9e2005-12-19 16:27:04 -08003856 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3857 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858
Joe Perches63c3a662011-04-26 08:12:10 +00003859 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 u32 newbits3;
3861
3862 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3864 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3865 CLOCK_CTRL_TXCLK_DISABLE |
3866 CLOCK_CTRL_44MHZ_CORE);
3867 } else {
3868 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3869 }
3870
Michael Chanb401e9e2005-12-19 16:27:04 -08003871 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3872 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873 }
3874 }
3875
Joe Perches63c3a662011-04-26 08:12:10 +00003876 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
Matt Carlson0a459aa2008-11-03 16:54:15 -08003877 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08003878
Matt Carlsoncd0d7222011-07-13 09:27:33 +00003879 tg3_frob_aux_power(tp, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880
3881 /* Workaround for unstable PLL clock */
3882 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3883 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3884 u32 val = tr32(0x7d00);
3885
3886 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3887 tw32(0x7d00, val);
Joe Perches63c3a662011-04-26 08:12:10 +00003888 if (!tg3_flag(tp, ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08003889 int err;
3890
3891 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08003893 if (!err)
3894 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08003895 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 }
3897
Michael Chanbbadf502006-04-06 21:46:34 -07003898 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3899
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 return 0;
3901}
3902
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003903static void tg3_power_down(struct tg3 *tp)
3904{
3905 tg3_power_down_prepare(tp);
3906
Joe Perches63c3a662011-04-26 08:12:10 +00003907 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003908 pci_set_power_state(tp->pdev, PCI_D3hot);
3909}
3910
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3912{
3913 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3914 case MII_TG3_AUX_STAT_10HALF:
3915 *speed = SPEED_10;
3916 *duplex = DUPLEX_HALF;
3917 break;
3918
3919 case MII_TG3_AUX_STAT_10FULL:
3920 *speed = SPEED_10;
3921 *duplex = DUPLEX_FULL;
3922 break;
3923
3924 case MII_TG3_AUX_STAT_100HALF:
3925 *speed = SPEED_100;
3926 *duplex = DUPLEX_HALF;
3927 break;
3928
3929 case MII_TG3_AUX_STAT_100FULL:
3930 *speed = SPEED_100;
3931 *duplex = DUPLEX_FULL;
3932 break;
3933
3934 case MII_TG3_AUX_STAT_1000HALF:
3935 *speed = SPEED_1000;
3936 *duplex = DUPLEX_HALF;
3937 break;
3938
3939 case MII_TG3_AUX_STAT_1000FULL:
3940 *speed = SPEED_1000;
3941 *duplex = DUPLEX_FULL;
3942 break;
3943
3944 default:
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003945 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Michael Chan715116a2006-09-27 16:09:25 -07003946 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3947 SPEED_10;
3948 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3949 DUPLEX_HALF;
3950 break;
3951 }
Matt Carlsone7405222012-02-13 15:20:16 +00003952 *speed = SPEED_UNKNOWN;
3953 *duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956}
3957
Matt Carlson42b64a42011-05-19 12:12:49 +00003958static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959{
Matt Carlson42b64a42011-05-19 12:12:49 +00003960 int err = 0;
3961 u32 val, new_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962
Matt Carlson42b64a42011-05-19 12:12:49 +00003963 new_adv = ADVERTISE_CSMA;
Hiroaki SHIMODA202ff1c2011-11-22 04:05:41 +00003964 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
Matt Carlsonf88788f2011-12-14 11:10:00 +00003965 new_adv |= mii_advertise_flowctrl(flowctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966
Matt Carlson42b64a42011-05-19 12:12:49 +00003967 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3968 if (err)
3969 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970
Matt Carlson4f272092011-12-14 11:09:57 +00003971 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3972 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003973
Matt Carlson4f272092011-12-14 11:09:57 +00003974 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3975 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3976 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003977
Matt Carlson4f272092011-12-14 11:09:57 +00003978 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3979 if (err)
3980 goto done;
3981 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003982
Matt Carlson42b64a42011-05-19 12:12:49 +00003983 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3984 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985
Matt Carlson42b64a42011-05-19 12:12:49 +00003986 tw32(TG3_CPMU_EEE_MODE,
3987 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003988
Matt Carlson42b64a42011-05-19 12:12:49 +00003989 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3990 if (!err) {
3991 u32 err2;
Matt Carlson52b02d02010-10-14 10:37:41 +00003992
Matt Carlsona6b68da2010-12-06 08:28:52 +00003993 val = 0;
Matt Carlson42b64a42011-05-19 12:12:49 +00003994 /* Advertise 100-BaseTX EEE ability */
3995 if (advertise & ADVERTISED_100baseT_Full)
3996 val |= MDIO_AN_EEE_ADV_100TX;
3997 /* Advertise 1000-BaseT EEE ability */
3998 if (advertise & ADVERTISED_1000baseT_Full)
3999 val |= MDIO_AN_EEE_ADV_1000T;
4000 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
Matt Carlsonb715ce92011-07-20 10:20:52 +00004001 if (err)
4002 val = 0;
4003
4004 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4005 case ASIC_REV_5717:
4006 case ASIC_REV_57765:
Matt Carlson55086ad2011-12-14 11:09:59 +00004007 case ASIC_REV_57766:
Matt Carlsonb715ce92011-07-20 10:20:52 +00004008 case ASIC_REV_5719:
4009 /* If we advertised any eee advertisements above... */
4010 if (val)
4011 val = MII_TG3_DSP_TAP26_ALNOKO |
4012 MII_TG3_DSP_TAP26_RMRXSTO |
4013 MII_TG3_DSP_TAP26_OPCSINPT;
4014 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4015 /* Fall through */
4016 case ASIC_REV_5720:
4017 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4018 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4019 MII_TG3_DSP_CH34TP2_HIBW01);
4020 }
Matt Carlson52b02d02010-10-14 10:37:41 +00004021
Matt Carlson42b64a42011-05-19 12:12:49 +00004022 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4023 if (!err)
4024 err = err2;
4025 }
4026
4027done:
4028 return err;
4029}
4030
4031static void tg3_phy_copper_begin(struct tg3 *tp)
4032{
Matt Carlsond13ba512012-02-22 12:35:19 +00004033 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4034 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4035 u32 adv, fc;
Matt Carlson42b64a42011-05-19 12:12:49 +00004036
Matt Carlsond13ba512012-02-22 12:35:19 +00004037 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4038 adv = ADVERTISED_10baseT_Half |
4039 ADVERTISED_10baseT_Full;
4040 if (tg3_flag(tp, WOL_SPEED_100MB))
4041 adv |= ADVERTISED_100baseT_Half |
4042 ADVERTISED_100baseT_Full;
Matt Carlson42b64a42011-05-19 12:12:49 +00004043
Matt Carlsond13ba512012-02-22 12:35:19 +00004044 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
Matt Carlson42b64a42011-05-19 12:12:49 +00004045 } else {
Matt Carlsond13ba512012-02-22 12:35:19 +00004046 adv = tp->link_config.advertising;
4047 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4048 adv &= ~(ADVERTISED_1000baseT_Half |
4049 ADVERTISED_1000baseT_Full);
4050
4051 fc = tp->link_config.flowctrl;
Matt Carlson42b64a42011-05-19 12:12:49 +00004052 }
4053
Matt Carlsond13ba512012-02-22 12:35:19 +00004054 tg3_phy_autoneg_cfg(tp, adv, fc);
Matt Carlson52b02d02010-10-14 10:37:41 +00004055
Matt Carlsond13ba512012-02-22 12:35:19 +00004056 tg3_writephy(tp, MII_BMCR,
4057 BMCR_ANENABLE | BMCR_ANRESTART);
4058 } else {
4059 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 u32 bmcr, orig_bmcr;
4061
4062 tp->link_config.active_speed = tp->link_config.speed;
4063 tp->link_config.active_duplex = tp->link_config.duplex;
4064
4065 bmcr = 0;
4066 switch (tp->link_config.speed) {
4067 default:
4068 case SPEED_10:
4069 break;
4070
4071 case SPEED_100:
4072 bmcr |= BMCR_SPEED100;
4073 break;
4074
4075 case SPEED_1000:
Matt Carlson221c5632011-06-13 13:39:01 +00004076 bmcr |= BMCR_SPEED1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004078 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
4080 if (tp->link_config.duplex == DUPLEX_FULL)
4081 bmcr |= BMCR_FULLDPLX;
4082
4083 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4084 (bmcr != orig_bmcr)) {
4085 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4086 for (i = 0; i < 1500; i++) {
4087 u32 tmp;
4088
4089 udelay(10);
4090 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4091 tg3_readphy(tp, MII_BMSR, &tmp))
4092 continue;
4093 if (!(tmp & BMSR_LSTATUS)) {
4094 udelay(40);
4095 break;
4096 }
4097 }
4098 tg3_writephy(tp, MII_BMCR, bmcr);
4099 udelay(40);
4100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 }
4102}
4103
4104static int tg3_init_5401phy_dsp(struct tg3 *tp)
4105{
4106 int err;
4107
4108 /* Turn off tap power management. */
4109 /* Set Extended packet length bit */
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004110 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00004112 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4113 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4114 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4115 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4116 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117
4118 udelay(40);
4119
4120 return err;
4121}
4122
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004123static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124{
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004125 u32 advmsk, tgtadv, advertising;
Michael Chan3600d912006-12-07 00:21:48 -08004126
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004127 advertising = tp->link_config.advertising;
4128 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004130 advmsk = ADVERTISE_ALL;
4131 if (tp->link_config.active_duplex == DUPLEX_FULL) {
Matt Carlsonf88788f2011-12-14 11:10:00 +00004132 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004133 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004136 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4137 return false;
4138
4139 if ((*lcladv & advmsk) != tgtadv)
4140 return false;
Matt Carlsonb99d2a52011-08-31 11:44:47 +00004141
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004142 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143 u32 tg3_ctrl;
4144
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004145 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
Michael Chan3600d912006-12-07 00:21:48 -08004146
Matt Carlson221c5632011-06-13 13:39:01 +00004147 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004148 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149
Matt Carlson3198e072012-02-13 15:20:10 +00004150 if (tgtadv &&
4151 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4152 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4153 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4154 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4155 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4156 } else {
4157 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4158 }
4159
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004160 if (tg3_ctrl != tgtadv)
4161 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 }
Matt Carlson93a700a2011-08-31 11:44:54 +00004163
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004164 return true;
Matt Carlsonef167e22007-12-20 20:10:01 -08004165}
4166
Matt Carlson859edb22011-12-08 14:40:16 +00004167static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4168{
4169 u32 lpeth = 0;
4170
4171 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4172 u32 val;
4173
4174 if (tg3_readphy(tp, MII_STAT1000, &val))
4175 return false;
4176
4177 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4178 }
4179
4180 if (tg3_readphy(tp, MII_LPA, rmtadv))
4181 return false;
4182
4183 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4184 tp->link_config.rmt_adv = lpeth;
4185
4186 return true;
4187}
4188
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4190{
4191 int current_link_up;
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004192 u32 bmsr, val;
Matt Carlsonef167e22007-12-20 20:10:01 -08004193 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 u16 current_speed;
4195 u8 current_duplex;
4196 int i, err;
4197
4198 tw32(MAC_EVENT, 0);
4199
4200 tw32_f(MAC_STATUS,
4201 (MAC_STATUS_SYNC_CHANGED |
4202 MAC_STATUS_CFG_CHANGED |
4203 MAC_STATUS_MI_COMPLETION |
4204 MAC_STATUS_LNKSTATE_CHANGED));
4205 udelay(40);
4206
Matt Carlson8ef21422008-05-02 16:47:53 -07004207 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4208 tw32_f(MAC_MI_MODE,
4209 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4210 udelay(80);
4211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004213 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214
4215 /* Some third-party PHYs need to be reset on link going
4216 * down.
4217 */
4218 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4221 netif_carrier_ok(tp->dev)) {
4222 tg3_readphy(tp, MII_BMSR, &bmsr);
4223 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4224 !(bmsr & BMSR_LSTATUS))
4225 force_reset = 1;
4226 }
4227 if (force_reset)
4228 tg3_phy_reset(tp);
4229
Matt Carlson79eb6902010-02-17 15:17:03 +00004230 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231 tg3_readphy(tp, MII_BMSR, &bmsr);
4232 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
Joe Perches63c3a662011-04-26 08:12:10 +00004233 !tg3_flag(tp, INIT_COMPLETE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 bmsr = 0;
4235
4236 if (!(bmsr & BMSR_LSTATUS)) {
4237 err = tg3_init_5401phy_dsp(tp);
4238 if (err)
4239 return err;
4240
4241 tg3_readphy(tp, MII_BMSR, &bmsr);
4242 for (i = 0; i < 1000; i++) {
4243 udelay(10);
4244 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4245 (bmsr & BMSR_LSTATUS)) {
4246 udelay(40);
4247 break;
4248 }
4249 }
4250
Matt Carlson79eb6902010-02-17 15:17:03 +00004251 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4252 TG3_PHY_REV_BCM5401_B0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 !(bmsr & BMSR_LSTATUS) &&
4254 tp->link_config.active_speed == SPEED_1000) {
4255 err = tg3_phy_reset(tp);
4256 if (!err)
4257 err = tg3_init_5401phy_dsp(tp);
4258 if (err)
4259 return err;
4260 }
4261 }
4262 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4263 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4264 /* 5701 {A0,B0} CRC bug workaround */
4265 tg3_writephy(tp, 0x15, 0x0a75);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00004266 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4267 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4268 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269 }
4270
4271 /* Clear pending interrupts... */
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004272 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4273 tg3_readphy(tp, MII_TG3_ISTAT, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004275 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004277 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4279
4280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4281 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4282 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4283 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4284 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4285 else
4286 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4287 }
4288
4289 current_link_up = 0;
Matt Carlsone7405222012-02-13 15:20:16 +00004290 current_speed = SPEED_UNKNOWN;
4291 current_duplex = DUPLEX_UNKNOWN;
Matt Carlsone348c5e2011-11-21 15:01:20 +00004292 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
Matt Carlson859edb22011-12-08 14:40:16 +00004293 tp->link_config.rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004295 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
Matt Carlson15ee95c2011-04-20 07:57:40 +00004296 err = tg3_phy_auxctl_read(tp,
4297 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4298 &val);
4299 if (!err && !(val & (1 << 10))) {
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004300 tg3_phy_auxctl_write(tp,
4301 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4302 val | (1 << 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 goto relink;
4304 }
4305 }
4306
4307 bmsr = 0;
4308 for (i = 0; i < 100; i++) {
4309 tg3_readphy(tp, MII_BMSR, &bmsr);
4310 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4311 (bmsr & BMSR_LSTATUS))
4312 break;
4313 udelay(40);
4314 }
4315
4316 if (bmsr & BMSR_LSTATUS) {
4317 u32 aux_stat, bmcr;
4318
4319 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4320 for (i = 0; i < 2000; i++) {
4321 udelay(10);
4322 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4323 aux_stat)
4324 break;
4325 }
4326
4327 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4328 &current_speed,
4329 &current_duplex);
4330
4331 bmcr = 0;
4332 for (i = 0; i < 200; i++) {
4333 tg3_readphy(tp, MII_BMCR, &bmcr);
4334 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4335 continue;
4336 if (bmcr && bmcr != 0x7fff)
4337 break;
4338 udelay(10);
4339 }
4340
Matt Carlsonef167e22007-12-20 20:10:01 -08004341 lcl_adv = 0;
4342 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343
Matt Carlsonef167e22007-12-20 20:10:01 -08004344 tp->link_config.active_speed = current_speed;
4345 tp->link_config.active_duplex = current_duplex;
4346
4347 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4348 if ((bmcr & BMCR_ANENABLE) &&
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004349 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
Matt Carlson859edb22011-12-08 14:40:16 +00004350 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004351 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 } else {
4353 if (!(bmcr & BMCR_ANENABLE) &&
4354 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08004355 tp->link_config.duplex == current_duplex &&
4356 tp->link_config.flowctrl ==
4357 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359 }
4360 }
4361
Matt Carlsonef167e22007-12-20 20:10:01 -08004362 if (current_link_up == 1 &&
Matt Carlsone348c5e2011-11-21 15:01:20 +00004363 tp->link_config.active_duplex == DUPLEX_FULL) {
4364 u32 reg, bit;
4365
4366 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4367 reg = MII_TG3_FET_GEN_STAT;
4368 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4369 } else {
4370 reg = MII_TG3_EXT_STAT;
4371 bit = MII_TG3_EXT_STAT_MDIX;
4372 }
4373
4374 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4375 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4376
Matt Carlsonef167e22007-12-20 20:10:01 -08004377 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Matt Carlsone348c5e2011-11-21 15:01:20 +00004378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379 }
4380
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381relink:
Matt Carlson800960682010-08-02 11:26:06 +00004382 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383 tg3_phy_copper_begin(tp);
4384
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004385 tg3_readphy(tp, MII_BMSR, &bmsr);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00004386 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4387 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 current_link_up = 1;
4389 }
4390
4391 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4392 if (current_link_up == 1) {
4393 if (tp->link_config.active_speed == SPEED_100 ||
4394 tp->link_config.active_speed == SPEED_10)
4395 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4396 else
4397 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004398 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
Matt Carlson7f97a4b2009-08-25 10:10:03 +00004399 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4400 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4402
4403 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4404 if (tp->link_config.active_duplex == DUPLEX_HALF)
4405 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4406
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07004408 if (current_link_up == 1 &&
4409 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07004411 else
4412 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413 }
4414
4415 /* ??? Without this setting Netgear GA302T PHY does not
4416 * ??? send/receive packets...
4417 */
Matt Carlson79eb6902010-02-17 15:17:03 +00004418 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4420 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4421 tw32_f(MAC_MI_MODE, tp->mi_mode);
4422 udelay(80);
4423 }
4424
4425 tw32_f(MAC_MODE, tp->mac_mode);
4426 udelay(40);
4427
Matt Carlson52b02d02010-10-14 10:37:41 +00004428 tg3_phy_eee_adjust(tp, current_link_up);
4429
Joe Perches63c3a662011-04-26 08:12:10 +00004430 if (tg3_flag(tp, USE_LINKCHG_REG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 /* Polled via timer. */
4432 tw32_f(MAC_EVENT, 0);
4433 } else {
4434 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4435 }
4436 udelay(40);
4437
4438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4439 current_link_up == 1 &&
4440 tp->link_config.active_speed == SPEED_1000 &&
Joe Perches63c3a662011-04-26 08:12:10 +00004441 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 udelay(120);
4443 tw32_f(MAC_STATUS,
4444 (MAC_STATUS_SYNC_CHANGED |
4445 MAC_STATUS_CFG_CHANGED));
4446 udelay(40);
4447 tg3_write_mem(tp,
4448 NIC_SRAM_FIRMWARE_MBOX,
4449 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4450 }
4451
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004452 /* Prevent send BD corruption. */
Joe Perches63c3a662011-04-26 08:12:10 +00004453 if (tg3_flag(tp, CLKREQ_BUG)) {
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004454 if (tp->link_config.active_speed == SPEED_100 ||
4455 tp->link_config.active_speed == SPEED_10)
Jiang Liu0f49bfb2012-08-20 13:28:20 -06004456 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4457 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004458 else
Jiang Liu0f49bfb2012-08-20 13:28:20 -06004459 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4460 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004461 }
4462
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463 if (current_link_up != netif_carrier_ok(tp->dev)) {
4464 if (current_link_up)
4465 netif_carrier_on(tp->dev);
4466 else
4467 netif_carrier_off(tp->dev);
4468 tg3_link_report(tp);
4469 }
4470
4471 return 0;
4472}
4473
4474struct tg3_fiber_aneginfo {
4475 int state;
4476#define ANEG_STATE_UNKNOWN 0
4477#define ANEG_STATE_AN_ENABLE 1
4478#define ANEG_STATE_RESTART_INIT 2
4479#define ANEG_STATE_RESTART 3
4480#define ANEG_STATE_DISABLE_LINK_OK 4
4481#define ANEG_STATE_ABILITY_DETECT_INIT 5
4482#define ANEG_STATE_ABILITY_DETECT 6
4483#define ANEG_STATE_ACK_DETECT_INIT 7
4484#define ANEG_STATE_ACK_DETECT 8
4485#define ANEG_STATE_COMPLETE_ACK_INIT 9
4486#define ANEG_STATE_COMPLETE_ACK 10
4487#define ANEG_STATE_IDLE_DETECT_INIT 11
4488#define ANEG_STATE_IDLE_DETECT 12
4489#define ANEG_STATE_LINK_OK 13
4490#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4491#define ANEG_STATE_NEXT_PAGE_WAIT 15
4492
4493 u32 flags;
4494#define MR_AN_ENABLE 0x00000001
4495#define MR_RESTART_AN 0x00000002
4496#define MR_AN_COMPLETE 0x00000004
4497#define MR_PAGE_RX 0x00000008
4498#define MR_NP_LOADED 0x00000010
4499#define MR_TOGGLE_TX 0x00000020
4500#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4501#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4502#define MR_LP_ADV_SYM_PAUSE 0x00000100
4503#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4504#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4505#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4506#define MR_LP_ADV_NEXT_PAGE 0x00001000
4507#define MR_TOGGLE_RX 0x00002000
4508#define MR_NP_RX 0x00004000
4509
4510#define MR_LINK_OK 0x80000000
4511
4512 unsigned long link_time, cur_time;
4513
4514 u32 ability_match_cfg;
4515 int ability_match_count;
4516
4517 char ability_match, idle_match, ack_match;
4518
4519 u32 txconfig, rxconfig;
4520#define ANEG_CFG_NP 0x00000080
4521#define ANEG_CFG_ACK 0x00000040
4522#define ANEG_CFG_RF2 0x00000020
4523#define ANEG_CFG_RF1 0x00000010
4524#define ANEG_CFG_PS2 0x00000001
4525#define ANEG_CFG_PS1 0x00008000
4526#define ANEG_CFG_HD 0x00004000
4527#define ANEG_CFG_FD 0x00002000
4528#define ANEG_CFG_INVAL 0x00001f06
4529
4530};
4531#define ANEG_OK 0
4532#define ANEG_DONE 1
4533#define ANEG_TIMER_ENAB 2
4534#define ANEG_FAILED -1
4535
4536#define ANEG_STATE_SETTLE_TIME 10000
4537
4538static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4539 struct tg3_fiber_aneginfo *ap)
4540{
Matt Carlson5be73b42007-12-20 20:09:29 -08004541 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 unsigned long delta;
4543 u32 rx_cfg_reg;
4544 int ret;
4545
4546 if (ap->state == ANEG_STATE_UNKNOWN) {
4547 ap->rxconfig = 0;
4548 ap->link_time = 0;
4549 ap->cur_time = 0;
4550 ap->ability_match_cfg = 0;
4551 ap->ability_match_count = 0;
4552 ap->ability_match = 0;
4553 ap->idle_match = 0;
4554 ap->ack_match = 0;
4555 }
4556 ap->cur_time++;
4557
4558 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4559 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4560
4561 if (rx_cfg_reg != ap->ability_match_cfg) {
4562 ap->ability_match_cfg = rx_cfg_reg;
4563 ap->ability_match = 0;
4564 ap->ability_match_count = 0;
4565 } else {
4566 if (++ap->ability_match_count > 1) {
4567 ap->ability_match = 1;
4568 ap->ability_match_cfg = rx_cfg_reg;
4569 }
4570 }
4571 if (rx_cfg_reg & ANEG_CFG_ACK)
4572 ap->ack_match = 1;
4573 else
4574 ap->ack_match = 0;
4575
4576 ap->idle_match = 0;
4577 } else {
4578 ap->idle_match = 1;
4579 ap->ability_match_cfg = 0;
4580 ap->ability_match_count = 0;
4581 ap->ability_match = 0;
4582 ap->ack_match = 0;
4583
4584 rx_cfg_reg = 0;
4585 }
4586
4587 ap->rxconfig = rx_cfg_reg;
4588 ret = ANEG_OK;
4589
Matt Carlson33f401a2010-04-05 10:19:27 +00004590 switch (ap->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591 case ANEG_STATE_UNKNOWN:
4592 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4593 ap->state = ANEG_STATE_AN_ENABLE;
4594
4595 /* fallthru */
4596 case ANEG_STATE_AN_ENABLE:
4597 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4598 if (ap->flags & MR_AN_ENABLE) {
4599 ap->link_time = 0;
4600 ap->cur_time = 0;
4601 ap->ability_match_cfg = 0;
4602 ap->ability_match_count = 0;
4603 ap->ability_match = 0;
4604 ap->idle_match = 0;
4605 ap->ack_match = 0;
4606
4607 ap->state = ANEG_STATE_RESTART_INIT;
4608 } else {
4609 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4610 }
4611 break;
4612
4613 case ANEG_STATE_RESTART_INIT:
4614 ap->link_time = ap->cur_time;
4615 ap->flags &= ~(MR_NP_LOADED);
4616 ap->txconfig = 0;
4617 tw32(MAC_TX_AUTO_NEG, 0);
4618 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4619 tw32_f(MAC_MODE, tp->mac_mode);
4620 udelay(40);
4621
4622 ret = ANEG_TIMER_ENAB;
4623 ap->state = ANEG_STATE_RESTART;
4624
4625 /* fallthru */
4626 case ANEG_STATE_RESTART:
4627 delta = ap->cur_time - ap->link_time;
Matt Carlson859a588792010-04-05 10:19:28 +00004628 if (delta > ANEG_STATE_SETTLE_TIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
Matt Carlson859a588792010-04-05 10:19:28 +00004630 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 ret = ANEG_TIMER_ENAB;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 break;
4633
4634 case ANEG_STATE_DISABLE_LINK_OK:
4635 ret = ANEG_DONE;
4636 break;
4637
4638 case ANEG_STATE_ABILITY_DETECT_INIT:
4639 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08004640 ap->txconfig = ANEG_CFG_FD;
4641 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4642 if (flowctrl & ADVERTISE_1000XPAUSE)
4643 ap->txconfig |= ANEG_CFG_PS1;
4644 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4645 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4647 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4648 tw32_f(MAC_MODE, tp->mac_mode);
4649 udelay(40);
4650
4651 ap->state = ANEG_STATE_ABILITY_DETECT;
4652 break;
4653
4654 case ANEG_STATE_ABILITY_DETECT:
Matt Carlson859a588792010-04-05 10:19:28 +00004655 if (ap->ability_match != 0 && ap->rxconfig != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 ap->state = ANEG_STATE_ACK_DETECT_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657 break;
4658
4659 case ANEG_STATE_ACK_DETECT_INIT:
4660 ap->txconfig |= ANEG_CFG_ACK;
4661 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4662 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4663 tw32_f(MAC_MODE, tp->mac_mode);
4664 udelay(40);
4665
4666 ap->state = ANEG_STATE_ACK_DETECT;
4667
4668 /* fallthru */
4669 case ANEG_STATE_ACK_DETECT:
4670 if (ap->ack_match != 0) {
4671 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4672 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4673 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4674 } else {
4675 ap->state = ANEG_STATE_AN_ENABLE;
4676 }
4677 } else if (ap->ability_match != 0 &&
4678 ap->rxconfig == 0) {
4679 ap->state = ANEG_STATE_AN_ENABLE;
4680 }
4681 break;
4682
4683 case ANEG_STATE_COMPLETE_ACK_INIT:
4684 if (ap->rxconfig & ANEG_CFG_INVAL) {
4685 ret = ANEG_FAILED;
4686 break;
4687 }
4688 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4689 MR_LP_ADV_HALF_DUPLEX |
4690 MR_LP_ADV_SYM_PAUSE |
4691 MR_LP_ADV_ASYM_PAUSE |
4692 MR_LP_ADV_REMOTE_FAULT1 |
4693 MR_LP_ADV_REMOTE_FAULT2 |
4694 MR_LP_ADV_NEXT_PAGE |
4695 MR_TOGGLE_RX |
4696 MR_NP_RX);
4697 if (ap->rxconfig & ANEG_CFG_FD)
4698 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4699 if (ap->rxconfig & ANEG_CFG_HD)
4700 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4701 if (ap->rxconfig & ANEG_CFG_PS1)
4702 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4703 if (ap->rxconfig & ANEG_CFG_PS2)
4704 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4705 if (ap->rxconfig & ANEG_CFG_RF1)
4706 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4707 if (ap->rxconfig & ANEG_CFG_RF2)
4708 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4709 if (ap->rxconfig & ANEG_CFG_NP)
4710 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4711
4712 ap->link_time = ap->cur_time;
4713
4714 ap->flags ^= (MR_TOGGLE_TX);
4715 if (ap->rxconfig & 0x0008)
4716 ap->flags |= MR_TOGGLE_RX;
4717 if (ap->rxconfig & ANEG_CFG_NP)
4718 ap->flags |= MR_NP_RX;
4719 ap->flags |= MR_PAGE_RX;
4720
4721 ap->state = ANEG_STATE_COMPLETE_ACK;
4722 ret = ANEG_TIMER_ENAB;
4723 break;
4724
4725 case ANEG_STATE_COMPLETE_ACK:
4726 if (ap->ability_match != 0 &&
4727 ap->rxconfig == 0) {
4728 ap->state = ANEG_STATE_AN_ENABLE;
4729 break;
4730 }
4731 delta = ap->cur_time - ap->link_time;
4732 if (delta > ANEG_STATE_SETTLE_TIME) {
4733 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4734 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4735 } else {
4736 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4737 !(ap->flags & MR_NP_RX)) {
4738 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4739 } else {
4740 ret = ANEG_FAILED;
4741 }
4742 }
4743 }
4744 break;
4745
4746 case ANEG_STATE_IDLE_DETECT_INIT:
4747 ap->link_time = ap->cur_time;
4748 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4749 tw32_f(MAC_MODE, tp->mac_mode);
4750 udelay(40);
4751
4752 ap->state = ANEG_STATE_IDLE_DETECT;
4753 ret = ANEG_TIMER_ENAB;
4754 break;
4755
4756 case ANEG_STATE_IDLE_DETECT:
4757 if (ap->ability_match != 0 &&
4758 ap->rxconfig == 0) {
4759 ap->state = ANEG_STATE_AN_ENABLE;
4760 break;
4761 }
4762 delta = ap->cur_time - ap->link_time;
4763 if (delta > ANEG_STATE_SETTLE_TIME) {
4764 /* XXX another gem from the Broadcom driver :( */
4765 ap->state = ANEG_STATE_LINK_OK;
4766 }
4767 break;
4768
4769 case ANEG_STATE_LINK_OK:
4770 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4771 ret = ANEG_DONE;
4772 break;
4773
4774 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4775 /* ??? unimplemented */
4776 break;
4777
4778 case ANEG_STATE_NEXT_PAGE_WAIT:
4779 /* ??? unimplemented */
4780 break;
4781
4782 default:
4783 ret = ANEG_FAILED;
4784 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004785 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786
4787 return ret;
4788}
4789
Matt Carlson5be73b42007-12-20 20:09:29 -08004790static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791{
4792 int res = 0;
4793 struct tg3_fiber_aneginfo aninfo;
4794 int status = ANEG_FAILED;
4795 unsigned int tick;
4796 u32 tmp;
4797
4798 tw32_f(MAC_TX_AUTO_NEG, 0);
4799
4800 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4801 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4802 udelay(40);
4803
4804 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4805 udelay(40);
4806
4807 memset(&aninfo, 0, sizeof(aninfo));
4808 aninfo.flags |= MR_AN_ENABLE;
4809 aninfo.state = ANEG_STATE_UNKNOWN;
4810 aninfo.cur_time = 0;
4811 tick = 0;
4812 while (++tick < 195000) {
4813 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4814 if (status == ANEG_DONE || status == ANEG_FAILED)
4815 break;
4816
4817 udelay(1);
4818 }
4819
4820 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4821 tw32_f(MAC_MODE, tp->mac_mode);
4822 udelay(40);
4823
Matt Carlson5be73b42007-12-20 20:09:29 -08004824 *txflags = aninfo.txconfig;
4825 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826
4827 if (status == ANEG_DONE &&
4828 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4829 MR_LP_ADV_FULL_DUPLEX)))
4830 res = 1;
4831
4832 return res;
4833}
4834
4835static void tg3_init_bcm8002(struct tg3 *tp)
4836{
4837 u32 mac_status = tr32(MAC_STATUS);
4838 int i;
4839
4840 /* Reset when initting first time or we have a link. */
Joe Perches63c3a662011-04-26 08:12:10 +00004841 if (tg3_flag(tp, INIT_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842 !(mac_status & MAC_STATUS_PCS_SYNCED))
4843 return;
4844
4845 /* Set PLL lock range. */
4846 tg3_writephy(tp, 0x16, 0x8007);
4847
4848 /* SW reset */
4849 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4850
4851 /* Wait for reset to complete. */
4852 /* XXX schedule_timeout() ... */
4853 for (i = 0; i < 500; i++)
4854 udelay(10);
4855
4856 /* Config mode; select PMA/Ch 1 regs. */
4857 tg3_writephy(tp, 0x10, 0x8411);
4858
4859 /* Enable auto-lock and comdet, select txclk for tx. */
4860 tg3_writephy(tp, 0x11, 0x0a10);
4861
4862 tg3_writephy(tp, 0x18, 0x00a0);
4863 tg3_writephy(tp, 0x16, 0x41ff);
4864
4865 /* Assert and deassert POR. */
4866 tg3_writephy(tp, 0x13, 0x0400);
4867 udelay(40);
4868 tg3_writephy(tp, 0x13, 0x0000);
4869
4870 tg3_writephy(tp, 0x11, 0x0a50);
4871 udelay(40);
4872 tg3_writephy(tp, 0x11, 0x0a10);
4873
4874 /* Wait for signal to stabilize */
4875 /* XXX schedule_timeout() ... */
4876 for (i = 0; i < 15000; i++)
4877 udelay(10);
4878
4879 /* Deselect the channel register so we can read the PHYID
4880 * later.
4881 */
4882 tg3_writephy(tp, 0x10, 0x8011);
4883}
4884
4885static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4886{
Matt Carlson82cd3d12007-12-20 20:09:00 -08004887 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888 u32 sg_dig_ctrl, sg_dig_status;
4889 u32 serdes_cfg, expected_sg_dig_ctrl;
4890 int workaround, port_a;
4891 int current_link_up;
4892
4893 serdes_cfg = 0;
4894 expected_sg_dig_ctrl = 0;
4895 workaround = 0;
4896 port_a = 1;
4897 current_link_up = 0;
4898
4899 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4900 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4901 workaround = 1;
4902 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4903 port_a = 0;
4904
4905 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4906 /* preserve bits 20-23 for voltage regulator */
4907 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4908 }
4909
4910 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4911
4912 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004913 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914 if (workaround) {
4915 u32 val = serdes_cfg;
4916
4917 if (port_a)
4918 val |= 0xc010000;
4919 else
4920 val |= 0x4010000;
4921 tw32_f(MAC_SERDES_CFG, val);
4922 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004923
4924 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925 }
4926 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4927 tg3_setup_flow_control(tp, 0, 0);
4928 current_link_up = 1;
4929 }
4930 goto out;
4931 }
4932
4933 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004934 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935
Matt Carlson82cd3d12007-12-20 20:09:00 -08004936 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4937 if (flowctrl & ADVERTISE_1000XPAUSE)
4938 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4939 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4940 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941
4942 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004943 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
Michael Chan3d3ebe72006-09-27 15:59:15 -07004944 tp->serdes_counter &&
4945 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4946 MAC_STATUS_RCVD_CFG)) ==
4947 MAC_STATUS_PCS_SYNCED)) {
4948 tp->serdes_counter--;
4949 current_link_up = 1;
4950 goto out;
4951 }
4952restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 if (workaround)
4954 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004955 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 udelay(5);
4957 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4958
Michael Chan3d3ebe72006-09-27 15:59:15 -07004959 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004960 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4962 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07004963 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 mac_status = tr32(MAC_STATUS);
4965
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004966 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08004968 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969
Matt Carlson82cd3d12007-12-20 20:09:00 -08004970 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4971 local_adv |= ADVERTISE_1000XPAUSE;
4972 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4973 local_adv |= ADVERTISE_1000XPSE_ASYM;
4974
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004975 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08004976 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004977 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08004978 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979
Matt Carlson859edb22011-12-08 14:40:16 +00004980 tp->link_config.rmt_adv =
4981 mii_adv_to_ethtool_adv_x(remote_adv);
4982
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983 tg3_setup_flow_control(tp, local_adv, remote_adv);
4984 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07004985 tp->serdes_counter = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004986 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08004987 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07004988 if (tp->serdes_counter)
4989 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 else {
4991 if (workaround) {
4992 u32 val = serdes_cfg;
4993
4994 if (port_a)
4995 val |= 0xc010000;
4996 else
4997 val |= 0x4010000;
4998
4999 tw32_f(MAC_SERDES_CFG, val);
5000 }
5001
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005002 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 udelay(40);
5004
5005 /* Link parallel detection - link is up */
5006 /* only if we have PCS_SYNC and not */
5007 /* receiving config code words */
5008 mac_status = tr32(MAC_STATUS);
5009 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5010 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5011 tg3_setup_flow_control(tp, 0, 0);
5012 current_link_up = 1;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005013 tp->phy_flags |=
5014 TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005015 tp->serdes_counter =
5016 SERDES_PARALLEL_DET_TIMEOUT;
5017 } else
5018 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 }
5020 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07005021 } else {
5022 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005023 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024 }
5025
5026out:
5027 return current_link_up;
5028}
5029
5030static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5031{
5032 int current_link_up = 0;
5033
Michael Chan5cf64b8a2007-05-05 12:11:21 -07005034 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036
5037 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08005038 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005040
Matt Carlson5be73b42007-12-20 20:09:29 -08005041 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5042 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043
Matt Carlson5be73b42007-12-20 20:09:29 -08005044 if (txflags & ANEG_CFG_PS1)
5045 local_adv |= ADVERTISE_1000XPAUSE;
5046 if (txflags & ANEG_CFG_PS2)
5047 local_adv |= ADVERTISE_1000XPSE_ASYM;
5048
5049 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5050 remote_adv |= LPA_1000XPAUSE;
5051 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5052 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053
Matt Carlson859edb22011-12-08 14:40:16 +00005054 tp->link_config.rmt_adv =
5055 mii_adv_to_ethtool_adv_x(remote_adv);
5056
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 tg3_setup_flow_control(tp, local_adv, remote_adv);
5058
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059 current_link_up = 1;
5060 }
5061 for (i = 0; i < 30; i++) {
5062 udelay(20);
5063 tw32_f(MAC_STATUS,
5064 (MAC_STATUS_SYNC_CHANGED |
5065 MAC_STATUS_CFG_CHANGED));
5066 udelay(40);
5067 if ((tr32(MAC_STATUS) &
5068 (MAC_STATUS_SYNC_CHANGED |
5069 MAC_STATUS_CFG_CHANGED)) == 0)
5070 break;
5071 }
5072
5073 mac_status = tr32(MAC_STATUS);
5074 if (current_link_up == 0 &&
5075 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5076 !(mac_status & MAC_STATUS_RCVD_CFG))
5077 current_link_up = 1;
5078 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08005079 tg3_setup_flow_control(tp, 0, 0);
5080
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081 /* Forcing 1000FD link up. */
5082 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083
5084 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5085 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07005086
5087 tw32_f(MAC_MODE, tp->mac_mode);
5088 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 }
5090
5091out:
5092 return current_link_up;
5093}
5094
5095static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5096{
5097 u32 orig_pause_cfg;
5098 u16 orig_active_speed;
5099 u8 orig_active_duplex;
5100 u32 mac_status;
5101 int current_link_up;
5102 int i;
5103
Matt Carlson8d018622007-12-20 20:05:44 -08005104 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105 orig_active_speed = tp->link_config.active_speed;
5106 orig_active_duplex = tp->link_config.active_duplex;
5107
Joe Perches63c3a662011-04-26 08:12:10 +00005108 if (!tg3_flag(tp, HW_AUTONEG) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 netif_carrier_ok(tp->dev) &&
Joe Perches63c3a662011-04-26 08:12:10 +00005110 tg3_flag(tp, INIT_COMPLETE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111 mac_status = tr32(MAC_STATUS);
5112 mac_status &= (MAC_STATUS_PCS_SYNCED |
5113 MAC_STATUS_SIGNAL_DET |
5114 MAC_STATUS_CFG_CHANGED |
5115 MAC_STATUS_RCVD_CFG);
5116 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5117 MAC_STATUS_SIGNAL_DET)) {
5118 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5119 MAC_STATUS_CFG_CHANGED));
5120 return 0;
5121 }
5122 }
5123
5124 tw32_f(MAC_TX_AUTO_NEG, 0);
5125
5126 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5127 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5128 tw32_f(MAC_MODE, tp->mac_mode);
5129 udelay(40);
5130
Matt Carlson79eb6902010-02-17 15:17:03 +00005131 if (tp->phy_id == TG3_PHY_ID_BCM8002)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005132 tg3_init_bcm8002(tp);
5133
5134 /* Enable link change event even when serdes polling. */
5135 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5136 udelay(40);
5137
5138 current_link_up = 0;
Matt Carlson859edb22011-12-08 14:40:16 +00005139 tp->link_config.rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140 mac_status = tr32(MAC_STATUS);
5141
Joe Perches63c3a662011-04-26 08:12:10 +00005142 if (tg3_flag(tp, HW_AUTONEG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5144 else
5145 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5146
Matt Carlson898a56f2009-08-28 14:02:40 +00005147 tp->napi[0].hw_status->status =
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 (SD_STATUS_UPDATED |
Matt Carlson898a56f2009-08-28 14:02:40 +00005149 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
5151 for (i = 0; i < 100; i++) {
5152 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5153 MAC_STATUS_CFG_CHANGED));
5154 udelay(5);
5155 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07005156 MAC_STATUS_CFG_CHANGED |
5157 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158 break;
5159 }
5160
5161 mac_status = tr32(MAC_STATUS);
5162 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5163 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005164 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5165 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166 tw32_f(MAC_MODE, (tp->mac_mode |
5167 MAC_MODE_SEND_CONFIGS));
5168 udelay(1);
5169 tw32_f(MAC_MODE, tp->mac_mode);
5170 }
5171 }
5172
5173 if (current_link_up == 1) {
5174 tp->link_config.active_speed = SPEED_1000;
5175 tp->link_config.active_duplex = DUPLEX_FULL;
5176 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5177 LED_CTRL_LNKLED_OVERRIDE |
5178 LED_CTRL_1000MBPS_ON));
5179 } else {
Matt Carlsone7405222012-02-13 15:20:16 +00005180 tp->link_config.active_speed = SPEED_UNKNOWN;
5181 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5183 LED_CTRL_LNKLED_OVERRIDE |
5184 LED_CTRL_TRAFFIC_OVERRIDE));
5185 }
5186
5187 if (current_link_up != netif_carrier_ok(tp->dev)) {
5188 if (current_link_up)
5189 netif_carrier_on(tp->dev);
5190 else
5191 netif_carrier_off(tp->dev);
5192 tg3_link_report(tp);
5193 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08005194 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195 if (orig_pause_cfg != now_pause_cfg ||
5196 orig_active_speed != tp->link_config.active_speed ||
5197 orig_active_duplex != tp->link_config.active_duplex)
5198 tg3_link_report(tp);
5199 }
5200
5201 return 0;
5202}
5203
Michael Chan747e8f82005-07-25 12:33:22 -07005204static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5205{
5206 int current_link_up, err = 0;
5207 u32 bmsr, bmcr;
5208 u16 current_speed;
5209 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08005210 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07005211
5212 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5213 tw32_f(MAC_MODE, tp->mac_mode);
5214 udelay(40);
5215
5216 tw32(MAC_EVENT, 0);
5217
5218 tw32_f(MAC_STATUS,
5219 (MAC_STATUS_SYNC_CHANGED |
5220 MAC_STATUS_CFG_CHANGED |
5221 MAC_STATUS_MI_COMPLETION |
5222 MAC_STATUS_LNKSTATE_CHANGED));
5223 udelay(40);
5224
5225 if (force_reset)
5226 tg3_phy_reset(tp);
5227
5228 current_link_up = 0;
Matt Carlsone7405222012-02-13 15:20:16 +00005229 current_speed = SPEED_UNKNOWN;
5230 current_duplex = DUPLEX_UNKNOWN;
Matt Carlson859edb22011-12-08 14:40:16 +00005231 tp->link_config.rmt_adv = 0;
Michael Chan747e8f82005-07-25 12:33:22 -07005232
5233 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5234 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08005235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5236 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5237 bmsr |= BMSR_LSTATUS;
5238 else
5239 bmsr &= ~BMSR_LSTATUS;
5240 }
Michael Chan747e8f82005-07-25 12:33:22 -07005241
5242 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5243
5244 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005245 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07005246 /* do nothing, just check for link up at the end */
5247 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson28011cf2011-11-16 18:36:59 -05005248 u32 adv, newadv;
Michael Chan747e8f82005-07-25 12:33:22 -07005249
5250 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
Matt Carlson28011cf2011-11-16 18:36:59 -05005251 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5252 ADVERTISE_1000XPAUSE |
5253 ADVERTISE_1000XPSE_ASYM |
5254 ADVERTISE_SLCT);
Michael Chan747e8f82005-07-25 12:33:22 -07005255
Matt Carlson28011cf2011-11-16 18:36:59 -05005256 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Matt Carlson37f07022011-11-17 14:30:55 +00005257 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
Michael Chan747e8f82005-07-25 12:33:22 -07005258
Matt Carlson28011cf2011-11-16 18:36:59 -05005259 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5260 tg3_writephy(tp, MII_ADVERTISE, newadv);
Michael Chan747e8f82005-07-25 12:33:22 -07005261 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5262 tg3_writephy(tp, MII_BMCR, bmcr);
5263
5264 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07005265 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005266 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005267
5268 return err;
5269 }
5270 } else {
5271 u32 new_bmcr;
5272
5273 bmcr &= ~BMCR_SPEED1000;
5274 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5275
5276 if (tp->link_config.duplex == DUPLEX_FULL)
5277 new_bmcr |= BMCR_FULLDPLX;
5278
5279 if (new_bmcr != bmcr) {
5280 /* BMCR_SPEED1000 is a reserved bit that needs
5281 * to be set on write.
5282 */
5283 new_bmcr |= BMCR_SPEED1000;
5284
5285 /* Force a linkdown */
5286 if (netif_carrier_ok(tp->dev)) {
5287 u32 adv;
5288
5289 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5290 adv &= ~(ADVERTISE_1000XFULL |
5291 ADVERTISE_1000XHALF |
5292 ADVERTISE_SLCT);
5293 tg3_writephy(tp, MII_ADVERTISE, adv);
5294 tg3_writephy(tp, MII_BMCR, bmcr |
5295 BMCR_ANRESTART |
5296 BMCR_ANENABLE);
5297 udelay(10);
5298 netif_carrier_off(tp->dev);
5299 }
5300 tg3_writephy(tp, MII_BMCR, new_bmcr);
5301 bmcr = new_bmcr;
5302 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5303 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08005304 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5305 ASIC_REV_5714) {
5306 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5307 bmsr |= BMSR_LSTATUS;
5308 else
5309 bmsr &= ~BMSR_LSTATUS;
5310 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005311 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005312 }
5313 }
5314
5315 if (bmsr & BMSR_LSTATUS) {
5316 current_speed = SPEED_1000;
5317 current_link_up = 1;
5318 if (bmcr & BMCR_FULLDPLX)
5319 current_duplex = DUPLEX_FULL;
5320 else
5321 current_duplex = DUPLEX_HALF;
5322
Matt Carlsonef167e22007-12-20 20:10:01 -08005323 local_adv = 0;
5324 remote_adv = 0;
5325
Michael Chan747e8f82005-07-25 12:33:22 -07005326 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08005327 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07005328
5329 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5330 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5331 common = local_adv & remote_adv;
5332 if (common & (ADVERTISE_1000XHALF |
5333 ADVERTISE_1000XFULL)) {
5334 if (common & ADVERTISE_1000XFULL)
5335 current_duplex = DUPLEX_FULL;
5336 else
5337 current_duplex = DUPLEX_HALF;
Matt Carlson859edb22011-12-08 14:40:16 +00005338
5339 tp->link_config.rmt_adv =
5340 mii_adv_to_ethtool_adv_x(remote_adv);
Joe Perches63c3a662011-04-26 08:12:10 +00005341 } else if (!tg3_flag(tp, 5780_CLASS)) {
Matt Carlson57d8b882010-06-05 17:24:35 +00005342 /* Link is up via parallel detect */
Matt Carlson859a588792010-04-05 10:19:28 +00005343 } else {
Michael Chan747e8f82005-07-25 12:33:22 -07005344 current_link_up = 0;
Matt Carlson859a588792010-04-05 10:19:28 +00005345 }
Michael Chan747e8f82005-07-25 12:33:22 -07005346 }
5347 }
5348
Matt Carlsonef167e22007-12-20 20:10:01 -08005349 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5350 tg3_setup_flow_control(tp, local_adv, remote_adv);
5351
Michael Chan747e8f82005-07-25 12:33:22 -07005352 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5353 if (tp->link_config.active_duplex == DUPLEX_HALF)
5354 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5355
5356 tw32_f(MAC_MODE, tp->mac_mode);
5357 udelay(40);
5358
5359 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5360
5361 tp->link_config.active_speed = current_speed;
5362 tp->link_config.active_duplex = current_duplex;
5363
5364 if (current_link_up != netif_carrier_ok(tp->dev)) {
5365 if (current_link_up)
5366 netif_carrier_on(tp->dev);
5367 else {
5368 netif_carrier_off(tp->dev);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005369 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005370 }
5371 tg3_link_report(tp);
5372 }
5373 return err;
5374}
5375
5376static void tg3_serdes_parallel_detect(struct tg3 *tp)
5377{
Michael Chan3d3ebe72006-09-27 15:59:15 -07005378 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07005379 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07005380 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07005381 return;
5382 }
Matt Carlsonc6cdf432010-04-05 10:19:26 +00005383
Michael Chan747e8f82005-07-25 12:33:22 -07005384 if (!netif_carrier_ok(tp->dev) &&
5385 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5386 u32 bmcr;
5387
5388 tg3_readphy(tp, MII_BMCR, &bmcr);
5389 if (bmcr & BMCR_ANENABLE) {
5390 u32 phy1, phy2;
5391
5392 /* Select shadow register 0x1f */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005393 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5394 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
Michael Chan747e8f82005-07-25 12:33:22 -07005395
5396 /* Select expansion interrupt status register */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005397 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5398 MII_TG3_DSP_EXP1_INT_STAT);
5399 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5400 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
Michael Chan747e8f82005-07-25 12:33:22 -07005401
5402 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5403 /* We have signal detect and not receiving
5404 * config code words, link is up by parallel
5405 * detection.
5406 */
5407
5408 bmcr &= ~BMCR_ANENABLE;
5409 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5410 tg3_writephy(tp, MII_BMCR, bmcr);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005411 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005412 }
5413 }
Matt Carlson859a588792010-04-05 10:19:28 +00005414 } else if (netif_carrier_ok(tp->dev) &&
5415 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005416 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07005417 u32 phy2;
5418
5419 /* Select expansion interrupt status register */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005420 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5421 MII_TG3_DSP_EXP1_INT_STAT);
5422 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
Michael Chan747e8f82005-07-25 12:33:22 -07005423 if (phy2 & 0x20) {
5424 u32 bmcr;
5425
5426 /* Config code words received, turn on autoneg. */
5427 tg3_readphy(tp, MII_BMCR, &bmcr);
5428 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5429
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005430 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005431
5432 }
5433 }
5434}
5435
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5437{
Matt Carlsonf2096f92011-04-05 14:22:48 +00005438 u32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005439 int err;
5440
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005441 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442 err = tg3_setup_fiber_phy(tp, force_reset);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005443 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chan747e8f82005-07-25 12:33:22 -07005444 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Matt Carlson859a588792010-04-05 10:19:28 +00005445 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 err = tg3_setup_copper_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447
Matt Carlsonbcb37f62008-11-03 16:52:09 -08005448 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonf2096f92011-04-05 14:22:48 +00005449 u32 scale;
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08005450
5451 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5452 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5453 scale = 65;
5454 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5455 scale = 6;
5456 else
5457 scale = 12;
5458
5459 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5460 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5461 tw32(GRC_MISC_CFG, val);
5462 }
5463
Matt Carlsonf2096f92011-04-05 14:22:48 +00005464 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5465 (6 << TX_LENGTHS_IPG_SHIFT);
5466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5467 val |= tr32(MAC_TX_LENGTHS) &
5468 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5469 TX_LENGTHS_CNT_DWN_VAL_MSK);
5470
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471 if (tp->link_config.active_speed == SPEED_1000 &&
5472 tp->link_config.active_duplex == DUPLEX_HALF)
Matt Carlsonf2096f92011-04-05 14:22:48 +00005473 tw32(MAC_TX_LENGTHS, val |
5474 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475 else
Matt Carlsonf2096f92011-04-05 14:22:48 +00005476 tw32(MAC_TX_LENGTHS, val |
5477 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005478
Joe Perches63c3a662011-04-26 08:12:10 +00005479 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480 if (netif_carrier_ok(tp->dev)) {
5481 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07005482 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005483 } else {
5484 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5485 }
5486 }
5487
Joe Perches63c3a662011-04-26 08:12:10 +00005488 if (tg3_flag(tp, ASPM_WORKAROUND)) {
Matt Carlsonf2096f92011-04-05 14:22:48 +00005489 val = tr32(PCIE_PWR_MGMT_THRESH);
Matt Carlson8ed5d972007-05-07 00:25:49 -07005490 if (!netif_carrier_ok(tp->dev))
5491 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5492 tp->pwrmgmt_thresh;
5493 else
5494 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5495 tw32(PCIE_PWR_MGMT_THRESH, val);
5496 }
5497
Linus Torvalds1da177e2005-04-16 15:20:36 -07005498 return err;
5499}
5500
Matt Carlson66cfd1b2010-09-30 10:34:30 +00005501static inline int tg3_irq_sync(struct tg3 *tp)
5502{
5503 return tp->irq_sync;
5504}
5505
Matt Carlson97bd8e42011-04-13 11:05:04 +00005506static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5507{
5508 int i;
5509
5510 dst = (u32 *)((u8 *)dst + off);
5511 for (i = 0; i < len; i += sizeof(u32))
5512 *dst++ = tr32(off + i);
5513}
5514
5515static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5516{
5517 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5518 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5519 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5520 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5521 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5522 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5523 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5524 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5525 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5526 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5527 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5528 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5529 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5530 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5531 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5532 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5533 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5534 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5535 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5536
Joe Perches63c3a662011-04-26 08:12:10 +00005537 if (tg3_flag(tp, SUPPORT_MSIX))
Matt Carlson97bd8e42011-04-13 11:05:04 +00005538 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5539
5540 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5541 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5542 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5543 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5544 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5545 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5546 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5547 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5548
Joe Perches63c3a662011-04-26 08:12:10 +00005549 if (!tg3_flag(tp, 5705_PLUS)) {
Matt Carlson97bd8e42011-04-13 11:05:04 +00005550 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5551 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5552 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5553 }
5554
5555 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5556 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5557 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5558 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5559 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5560
Joe Perches63c3a662011-04-26 08:12:10 +00005561 if (tg3_flag(tp, NVRAM))
Matt Carlson97bd8e42011-04-13 11:05:04 +00005562 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5563}
5564
5565static void tg3_dump_state(struct tg3 *tp)
5566{
5567 int i;
5568 u32 *regs;
5569
5570 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5571 if (!regs) {
5572 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5573 return;
5574 }
5575
Joe Perches63c3a662011-04-26 08:12:10 +00005576 if (tg3_flag(tp, PCI_EXPRESS)) {
Matt Carlson97bd8e42011-04-13 11:05:04 +00005577 /* Read up to but not including private PCI registers */
5578 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5579 regs[i / sizeof(u32)] = tr32(i);
5580 } else
5581 tg3_dump_legacy_regs(tp, regs);
5582
5583 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5584 if (!regs[i + 0] && !regs[i + 1] &&
5585 !regs[i + 2] && !regs[i + 3])
5586 continue;
5587
5588 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5589 i * 4,
5590 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5591 }
5592
5593 kfree(regs);
5594
5595 for (i = 0; i < tp->irq_cnt; i++) {
5596 struct tg3_napi *tnapi = &tp->napi[i];
5597
5598 /* SW status block */
5599 netdev_err(tp->dev,
5600 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5601 i,
5602 tnapi->hw_status->status,
5603 tnapi->hw_status->status_tag,
5604 tnapi->hw_status->rx_jumbo_consumer,
5605 tnapi->hw_status->rx_consumer,
5606 tnapi->hw_status->rx_mini_consumer,
5607 tnapi->hw_status->idx[0].rx_producer,
5608 tnapi->hw_status->idx[0].tx_consumer);
5609
5610 netdev_err(tp->dev,
5611 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5612 i,
5613 tnapi->last_tag, tnapi->last_irq_tag,
5614 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5615 tnapi->rx_rcb_ptr,
5616 tnapi->prodring.rx_std_prod_idx,
5617 tnapi->prodring.rx_std_cons_idx,
5618 tnapi->prodring.rx_jmb_prod_idx,
5619 tnapi->prodring.rx_jmb_cons_idx);
5620 }
5621}
5622
Michael Chandf3e6542006-05-26 17:48:07 -07005623/* This is called whenever we suspect that the system chipset is re-
5624 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5625 * is bogus tx completions. We try to recover by setting the
5626 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5627 * in the workqueue.
5628 */
5629static void tg3_tx_recover(struct tg3 *tp)
5630{
Joe Perches63c3a662011-04-26 08:12:10 +00005631 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
Michael Chandf3e6542006-05-26 17:48:07 -07005632 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5633
Matt Carlson5129c3a2010-04-05 10:19:23 +00005634 netdev_warn(tp->dev,
5635 "The system may be re-ordering memory-mapped I/O "
5636 "cycles to the network device, attempting to recover. "
5637 "Please report the problem to the driver maintainer "
5638 "and include system chipset information.\n");
Michael Chandf3e6542006-05-26 17:48:07 -07005639
5640 spin_lock(&tp->lock);
Joe Perches63c3a662011-04-26 08:12:10 +00005641 tg3_flag_set(tp, TX_RECOVERY_PENDING);
Michael Chandf3e6542006-05-26 17:48:07 -07005642 spin_unlock(&tp->lock);
5643}
5644
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005645static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
Michael Chan1b2a7202006-08-07 21:46:02 -07005646{
Matt Carlsonf65aac12010-08-02 11:26:03 +00005647 /* Tell compiler to fetch tx indices from memory. */
5648 barrier();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005649 return tnapi->tx_pending -
5650 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
Michael Chan1b2a7202006-08-07 21:46:02 -07005651}
5652
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653/* Tigon3 never reports partial packet sends. So we do not
5654 * need special logic to handle SKBs that have not had all
5655 * of their frags sent yet, like SunGEM does.
5656 */
Matt Carlson17375d22009-08-28 14:02:18 +00005657static void tg3_tx(struct tg3_napi *tnapi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658{
Matt Carlson17375d22009-08-28 14:02:18 +00005659 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00005660 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005661 u32 sw_idx = tnapi->tx_cons;
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005662 struct netdev_queue *txq;
5663 int index = tnapi - tp->napi;
Tom Herbert298376d2011-11-28 16:33:30 +00005664 unsigned int pkts_compl = 0, bytes_compl = 0;
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005665
Joe Perches63c3a662011-04-26 08:12:10 +00005666 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005667 index--;
5668
5669 txq = netdev_get_tx_queue(tp->dev, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005670
5671 while (sw_idx != hw_idx) {
Matt Carlsondf8944c2011-07-27 14:20:46 +00005672 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07005674 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675
Michael Chandf3e6542006-05-26 17:48:07 -07005676 if (unlikely(skb == NULL)) {
5677 tg3_tx_recover(tp);
5678 return;
5679 }
5680
Alexander Duyckf4188d82009-12-02 16:48:38 +00005681 pci_unmap_single(tp->pdev,
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005682 dma_unmap_addr(ri, mapping),
Alexander Duyckf4188d82009-12-02 16:48:38 +00005683 skb_headlen(skb),
5684 PCI_DMA_TODEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685
5686 ri->skb = NULL;
5687
Matt Carlsone01ee142011-07-27 14:20:50 +00005688 while (ri->fragmented) {
5689 ri->fragmented = false;
5690 sw_idx = NEXT_TX(sw_idx);
5691 ri = &tnapi->tx_buffers[sw_idx];
5692 }
5693
Linus Torvalds1da177e2005-04-16 15:20:36 -07005694 sw_idx = NEXT_TX(sw_idx);
5695
5696 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005697 ri = &tnapi->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07005698 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5699 tx_bug = 1;
Alexander Duyckf4188d82009-12-02 16:48:38 +00005700
5701 pci_unmap_page(tp->pdev,
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005702 dma_unmap_addr(ri, mapping),
Eric Dumazet9e903e02011-10-18 21:00:24 +00005703 skb_frag_size(&skb_shinfo(skb)->frags[i]),
Alexander Duyckf4188d82009-12-02 16:48:38 +00005704 PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00005705
5706 while (ri->fragmented) {
5707 ri->fragmented = false;
5708 sw_idx = NEXT_TX(sw_idx);
5709 ri = &tnapi->tx_buffers[sw_idx];
5710 }
5711
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712 sw_idx = NEXT_TX(sw_idx);
5713 }
5714
Tom Herbert298376d2011-11-28 16:33:30 +00005715 pkts_compl++;
5716 bytes_compl += skb->len;
5717
David S. Millerf47c11e2005-06-24 20:18:35 -07005718 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07005719
5720 if (unlikely(tx_bug)) {
5721 tg3_tx_recover(tp);
5722 return;
5723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005724 }
5725
Tom Herbert5cb917b2012-03-05 19:53:50 +00005726 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
Tom Herbert298376d2011-11-28 16:33:30 +00005727
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005728 tnapi->tx_cons = sw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005729
Michael Chan1b2a7202006-08-07 21:46:02 -07005730 /* Need to make the tx_cons update visible to tg3_start_xmit()
5731 * before checking for netif_queue_stopped(). Without the
5732 * memory barrier, there is a small possibility that tg3_start_xmit()
5733 * will miss it and cause the queue to be stopped forever.
5734 */
5735 smp_mb();
5736
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005737 if (unlikely(netif_tx_queue_stopped(txq) &&
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005738 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005739 __netif_tx_lock(txq, smp_processor_id());
5740 if (netif_tx_queue_stopped(txq) &&
Matt Carlsonf3f3f272009-08-28 14:03:21 +00005741 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
Matt Carlsonfe5f5782009-09-01 13:09:39 +00005742 netif_tx_wake_queue(txq);
5743 __netif_tx_unlock(txq);
Michael Chan51b91462005-09-01 17:41:28 -07005744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005745}
5746
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005747static void tg3_frag_free(bool is_frag, void *data)
5748{
5749 if (is_frag)
5750 put_page(virt_to_head_page(data));
5751 else
5752 kfree(data);
5753}
5754
Eric Dumazet9205fd92011-11-18 06:47:01 +00005755static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005756{
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005757 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5758 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5759
Eric Dumazet9205fd92011-11-18 06:47:01 +00005760 if (!ri->data)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005761 return;
5762
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005763 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005764 map_sz, PCI_DMA_FROMDEVICE);
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00005765 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005766 ri->data = NULL;
Matt Carlson2b2cdb62009-11-13 13:03:48 +00005767}
5768
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005769
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770/* Returns size of skb allocated or < 0 on error.
5771 *
5772 * We only need to fill in the address because the other members
5773 * of the RX descriptor are invariant, see tg3_init_rings.
5774 *
5775 * Note the purposeful assymetry of cpu vs. chip accesses. For
5776 * posting buffers we only dirty the first cache line of the RX
5777 * descriptor (containing the address). Whereas for the RX status
5778 * buffers the cpu only reads the last cacheline of the RX descriptor
5779 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5780 */
Eric Dumazet9205fd92011-11-18 06:47:01 +00005781static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005782 u32 opaque_key, u32 dest_idx_unmasked,
5783 unsigned int *frag_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784{
5785 struct tg3_rx_buffer_desc *desc;
Matt Carlsonf94e2902010-10-14 10:37:42 +00005786 struct ring_info *map;
Eric Dumazet9205fd92011-11-18 06:47:01 +00005787 u8 *data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788 dma_addr_t mapping;
Eric Dumazet9205fd92011-11-18 06:47:01 +00005789 int skb_size, data_size, dest_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791 switch (opaque_key) {
5792 case RXD_OPAQUE_RING_STD:
Matt Carlson2c49a442010-09-30 10:34:35 +00005793 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
Matt Carlson21f581a2009-08-28 14:00:25 +00005794 desc = &tpr->rx_std[dest_idx];
5795 map = &tpr->rx_std_buffers[dest_idx];
Eric Dumazet9205fd92011-11-18 06:47:01 +00005796 data_size = tp->rx_pkt_map_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797 break;
5798
5799 case RXD_OPAQUE_RING_JUMBO:
Matt Carlson2c49a442010-09-30 10:34:35 +00005800 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
Matt Carlson79ed5ac2009-08-28 14:00:55 +00005801 desc = &tpr->rx_jmb[dest_idx].std;
Matt Carlson21f581a2009-08-28 14:00:25 +00005802 map = &tpr->rx_jmb_buffers[dest_idx];
Eric Dumazet9205fd92011-11-18 06:47:01 +00005803 data_size = TG3_RX_JMB_MAP_SZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804 break;
5805
5806 default:
5807 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809
5810 /* Do not overwrite any of the map or rp information
5811 * until we are sure we can commit to a new buffer.
5812 *
5813 * Callers depend upon this behavior and assume that
5814 * we leave everything unchanged if we fail.
5815 */
Eric Dumazet9205fd92011-11-18 06:47:01 +00005816 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5817 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00005818 if (skb_size <= PAGE_SIZE) {
5819 data = netdev_alloc_frag(skb_size);
5820 *frag_size = skb_size;
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005821 } else {
5822 data = kmalloc(skb_size, GFP_ATOMIC);
5823 *frag_size = 0;
5824 }
Eric Dumazet9205fd92011-11-18 06:47:01 +00005825 if (!data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826 return -ENOMEM;
5827
Eric Dumazet9205fd92011-11-18 06:47:01 +00005828 mapping = pci_map_single(tp->pdev,
5829 data + TG3_RX_OFFSET(tp),
5830 data_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005831 PCI_DMA_FROMDEVICE);
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005832 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00005833 tg3_frag_free(skb_size <= PAGE_SIZE, data);
Matt Carlsona21771d2009-11-02 14:25:31 +00005834 return -EIO;
5835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005836
Eric Dumazet9205fd92011-11-18 06:47:01 +00005837 map->data = data;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005838 dma_unmap_addr_set(map, mapping, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005839
Linus Torvalds1da177e2005-04-16 15:20:36 -07005840 desc->addr_hi = ((u64)mapping >> 32);
5841 desc->addr_lo = ((u64)mapping & 0xffffffff);
5842
Eric Dumazet9205fd92011-11-18 06:47:01 +00005843 return data_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005844}
5845
5846/* We only need to move over in the address because the other
5847 * members of the RX descriptor are invariant. See notes above
Eric Dumazet9205fd92011-11-18 06:47:01 +00005848 * tg3_alloc_rx_data for full details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005849 */
Matt Carlsona3896162009-11-13 13:03:44 +00005850static void tg3_recycle_rx(struct tg3_napi *tnapi,
5851 struct tg3_rx_prodring_set *dpr,
5852 u32 opaque_key, int src_idx,
5853 u32 dest_idx_unmasked)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005854{
Matt Carlson17375d22009-08-28 14:02:18 +00005855 struct tg3 *tp = tnapi->tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005856 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5857 struct ring_info *src_map, *dest_map;
Matt Carlson8fea32b2010-09-15 08:59:58 +00005858 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
Matt Carlsonc6cdf432010-04-05 10:19:26 +00005859 int dest_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005860
5861 switch (opaque_key) {
5862 case RXD_OPAQUE_RING_STD:
Matt Carlson2c49a442010-09-30 10:34:35 +00005863 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
Matt Carlsona3896162009-11-13 13:03:44 +00005864 dest_desc = &dpr->rx_std[dest_idx];
5865 dest_map = &dpr->rx_std_buffers[dest_idx];
5866 src_desc = &spr->rx_std[src_idx];
5867 src_map = &spr->rx_std_buffers[src_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005868 break;
5869
5870 case RXD_OPAQUE_RING_JUMBO:
Matt Carlson2c49a442010-09-30 10:34:35 +00005871 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
Matt Carlsona3896162009-11-13 13:03:44 +00005872 dest_desc = &dpr->rx_jmb[dest_idx].std;
5873 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5874 src_desc = &spr->rx_jmb[src_idx].std;
5875 src_map = &spr->rx_jmb_buffers[src_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005876 break;
5877
5878 default:
5879 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005880 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005881
Eric Dumazet9205fd92011-11-18 06:47:01 +00005882 dest_map->data = src_map->data;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005883 dma_unmap_addr_set(dest_map, mapping,
5884 dma_unmap_addr(src_map, mapping));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005885 dest_desc->addr_hi = src_desc->addr_hi;
5886 dest_desc->addr_lo = src_desc->addr_lo;
Matt Carlsone92967b2010-02-12 14:47:06 +00005887
5888 /* Ensure that the update to the skb happens after the physical
5889 * addresses have been transferred to the new BD location.
5890 */
5891 smp_wmb();
5892
Eric Dumazet9205fd92011-11-18 06:47:01 +00005893 src_map->data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894}
5895
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896/* The RX ring scheme is composed of multiple rings which post fresh
5897 * buffers to the chip, and one special ring the chip uses to report
5898 * status back to the host.
5899 *
5900 * The special ring reports the status of received packets to the
5901 * host. The chip does not write into the original descriptor the
5902 * RX buffer was obtained from. The chip simply takes the original
5903 * descriptor as provided by the host, updates the status and length
5904 * field, then writes this into the next status ring entry.
5905 *
5906 * Each ring the host uses to post buffers to the chip is described
5907 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5908 * it is first placed into the on-chip ram. When the packet's length
5909 * is known, it walks down the TG3_BDINFO entries to select the ring.
5910 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5911 * which is within the range of the new packet's length is chosen.
5912 *
5913 * The "separate ring for rx status" scheme may sound queer, but it makes
5914 * sense from a cache coherency perspective. If only the host writes
5915 * to the buffer post rings, and only the chip writes to the rx status
5916 * rings, then cache lines never move beyond shared-modified state.
5917 * If both the host and chip were to write into the same ring, cache line
5918 * eviction could occur since both entities want it in an exclusive state.
5919 */
Matt Carlson17375d22009-08-28 14:02:18 +00005920static int tg3_rx(struct tg3_napi *tnapi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005921{
Matt Carlson17375d22009-08-28 14:02:18 +00005922 struct tg3 *tp = tnapi->tp;
Michael Chanf92905d2006-06-29 20:14:29 -07005923 u32 work_mask, rx_std_posted = 0;
Matt Carlson43619352009-11-13 13:03:47 +00005924 u32 std_prod_idx, jmb_prod_idx;
Matt Carlson72334482009-08-28 14:03:01 +00005925 u32 sw_idx = tnapi->rx_rcb_ptr;
Michael Chan483ba502005-04-25 15:14:03 -07005926 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005927 int received;
Matt Carlson8fea32b2010-09-15 08:59:58 +00005928 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005929
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00005930 hw_idx = *(tnapi->rx_rcb_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005931 /*
5932 * We need to order the read of hw_idx and the read of
5933 * the opaque cookie.
5934 */
5935 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005936 work_mask = 0;
5937 received = 0;
Matt Carlson43619352009-11-13 13:03:47 +00005938 std_prod_idx = tpr->rx_std_prod_idx;
5939 jmb_prod_idx = tpr->rx_jmb_prod_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005940 while (sw_idx != hw_idx && budget > 0) {
Matt Carlsonafc081f2009-11-13 13:03:43 +00005941 struct ring_info *ri;
Matt Carlson72334482009-08-28 14:03:01 +00005942 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005943 unsigned int len;
5944 struct sk_buff *skb;
5945 dma_addr_t dma_addr;
5946 u32 opaque_key, desc_idx, *post_ptr;
Eric Dumazet9205fd92011-11-18 06:47:01 +00005947 u8 *data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005948
5949 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5950 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5951 if (opaque_key == RXD_OPAQUE_RING_STD) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00005952 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005953 dma_addr = dma_unmap_addr(ri, mapping);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005954 data = ri->data;
Matt Carlson43619352009-11-13 13:03:47 +00005955 post_ptr = &std_prod_idx;
Michael Chanf92905d2006-06-29 20:14:29 -07005956 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005957 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00005958 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00005959 dma_addr = dma_unmap_addr(ri, mapping);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005960 data = ri->data;
Matt Carlson43619352009-11-13 13:03:47 +00005961 post_ptr = &jmb_prod_idx;
Matt Carlson21f581a2009-08-28 14:00:25 +00005962 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963 goto next_pkt_nopost;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005964
5965 work_mask |= opaque_key;
5966
5967 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5968 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5969 drop_it:
Matt Carlsona3896162009-11-13 13:03:44 +00005970 tg3_recycle_rx(tnapi, tpr, opaque_key,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005971 desc_idx, *post_ptr);
5972 drop_it_no_recycle:
5973 /* Other statistics kept track of by card. */
Eric Dumazetb0057c52010-10-10 19:55:52 +00005974 tp->rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975 goto next_pkt;
5976 }
5977
Eric Dumazet9205fd92011-11-18 06:47:01 +00005978 prefetch(data + TG3_RX_OFFSET(tp));
Matt Carlsonad829262008-11-21 17:16:16 -08005979 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5980 ETH_FCS_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005981
Matt Carlsond2757fc2010-04-12 06:58:27 +00005982 if (len > TG3_RX_COPY_THRESH(tp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005983 int skb_size;
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005984 unsigned int frag_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985
Eric Dumazet9205fd92011-11-18 06:47:01 +00005986 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005987 *post_ptr, &frag_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988 if (skb_size < 0)
5989 goto drop_it;
5990
Matt Carlson287be122009-08-28 13:58:46 +00005991 pci_unmap_single(tp->pdev, dma_addr, skb_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005992 PCI_DMA_FROMDEVICE);
5993
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005994 skb = build_skb(data, frag_size);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005995 if (!skb) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00005996 tg3_frag_free(frag_size != 0, data);
Eric Dumazet9205fd92011-11-18 06:47:01 +00005997 goto drop_it_no_recycle;
5998 }
5999 skb_reserve(skb, TG3_RX_OFFSET(tp));
6000 /* Ensure that the update to the data happens
Matt Carlson61e800c2010-02-17 15:16:54 +00006001 * after the usage of the old DMA mapping.
6002 */
6003 smp_wmb();
6004
Eric Dumazet9205fd92011-11-18 06:47:01 +00006005 ri->data = NULL;
Matt Carlson61e800c2010-02-17 15:16:54 +00006006
Linus Torvalds1da177e2005-04-16 15:20:36 -07006007 } else {
Matt Carlsona3896162009-11-13 13:03:44 +00006008 tg3_recycle_rx(tnapi, tpr, opaque_key,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006009 desc_idx, *post_ptr);
6010
Eric Dumazet9205fd92011-11-18 06:47:01 +00006011 skb = netdev_alloc_skb(tp->dev,
6012 len + TG3_RAW_IP_ALIGN);
6013 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006014 goto drop_it_no_recycle;
6015
Eric Dumazet9205fd92011-11-18 06:47:01 +00006016 skb_reserve(skb, TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006017 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006018 memcpy(skb->data,
6019 data + TG3_RX_OFFSET(tp),
6020 len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006021 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006022 }
6023
Eric Dumazet9205fd92011-11-18 06:47:01 +00006024 skb_put(skb, len);
Michał Mirosławdc668912011-04-07 03:35:07 +00006025 if ((tp->dev->features & NETIF_F_RXCSUM) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6027 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6028 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6029 skb->ip_summed = CHECKSUM_UNNECESSARY;
6030 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07006031 skb_checksum_none_assert(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006032
6033 skb->protocol = eth_type_trans(skb, tp->dev);
Matt Carlsonf7b493e2009-02-25 14:21:52 +00006034
6035 if (len > (tp->dev->mtu + ETH_HLEN) &&
6036 skb->protocol != htons(ETH_P_8021Q)) {
6037 dev_kfree_skb(skb);
Eric Dumazetb0057c52010-10-10 19:55:52 +00006038 goto drop_it_no_recycle;
Matt Carlsonf7b493e2009-02-25 14:21:52 +00006039 }
6040
Matt Carlson9dc7a112010-04-12 06:58:28 +00006041 if (desc->type_flags & RXD_FLAG_VLAN &&
Matt Carlsonbf933c82011-01-25 15:58:49 +00006042 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6043 __vlan_hwaccel_put_tag(skb,
6044 desc->err_vlan & RXD_VLAN_MASK);
Matt Carlson9dc7a112010-04-12 06:58:28 +00006045
Matt Carlsonbf933c82011-01-25 15:58:49 +00006046 napi_gro_receive(&tnapi->napi, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006047
Linus Torvalds1da177e2005-04-16 15:20:36 -07006048 received++;
6049 budget--;
6050
6051next_pkt:
6052 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07006053
6054 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006055 tpr->rx_std_prod_idx = std_prod_idx &
6056 tp->rx_std_ring_mask;
Matt Carlson86cfe4f2010-01-12 10:11:37 +00006057 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6058 tpr->rx_std_prod_idx);
Michael Chanf92905d2006-06-29 20:14:29 -07006059 work_mask &= ~RXD_OPAQUE_RING_STD;
6060 rx_std_posted = 0;
6061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006062next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07006063 sw_idx++;
Matt Carlson7cb32cf2010-09-30 10:34:36 +00006064 sw_idx &= tp->rx_ret_ring_mask;
Michael Chan52f6d692005-04-25 15:14:32 -07006065
6066 /* Refresh hw_idx to see if there is new work */
6067 if (sw_idx == hw_idx) {
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00006068 hw_idx = *(tnapi->rx_rcb_prod_idx);
Michael Chan52f6d692005-04-25 15:14:32 -07006069 rmb();
6070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006071 }
6072
6073 /* ACK the status ring. */
Matt Carlson72334482009-08-28 14:03:01 +00006074 tnapi->rx_rcb_ptr = sw_idx;
6075 tw32_rx_mbox(tnapi->consmbox, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006076
6077 /* Refill RX ring(s). */
Joe Perches63c3a662011-04-26 08:12:10 +00006078 if (!tg3_flag(tp, ENABLE_RSS)) {
Michael Chan6541b802012-03-04 14:48:14 +00006079 /* Sync BD data before updating mailbox */
6080 wmb();
6081
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006082 if (work_mask & RXD_OPAQUE_RING_STD) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006083 tpr->rx_std_prod_idx = std_prod_idx &
6084 tp->rx_std_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006085 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6086 tpr->rx_std_prod_idx);
6087 }
6088 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006089 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6090 tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006091 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6092 tpr->rx_jmb_prod_idx);
6093 }
6094 mmiowb();
6095 } else if (work_mask) {
6096 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6097 * updated before the producer indices can be updated.
6098 */
6099 smp_wmb();
6100
Matt Carlson2c49a442010-09-30 10:34:35 +00006101 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6102 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006103
Michael Chan7ae52892012-03-21 15:38:33 +00006104 if (tnapi != &tp->napi[1]) {
6105 tp->rx_refill = true;
Matt Carlsone4af1af2010-02-12 14:47:05 +00006106 napi_schedule(&tp->napi[1].napi);
Michael Chan7ae52892012-03-21 15:38:33 +00006107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006109
6110 return received;
6111}
6112
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006113static void tg3_poll_link(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006114{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006115 /* handle link change and other phy events */
Joe Perches63c3a662011-04-26 08:12:10 +00006116 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006117 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6118
Linus Torvalds1da177e2005-04-16 15:20:36 -07006119 if (sblk->status & SD_STATUS_LINK_CHG) {
6120 sblk->status = SD_STATUS_UPDATED |
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006121 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07006122 spin_lock(&tp->lock);
Joe Perches63c3a662011-04-26 08:12:10 +00006123 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsondd477002008-05-25 23:45:58 -07006124 tw32_f(MAC_STATUS,
6125 (MAC_STATUS_SYNC_CHANGED |
6126 MAC_STATUS_CFG_CHANGED |
6127 MAC_STATUS_MI_COMPLETION |
6128 MAC_STATUS_LNKSTATE_CHANGED));
6129 udelay(40);
6130 } else
6131 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07006132 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006133 }
6134 }
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006135}
6136
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006137static int tg3_rx_prodring_xfer(struct tg3 *tp,
6138 struct tg3_rx_prodring_set *dpr,
6139 struct tg3_rx_prodring_set *spr)
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006140{
6141 u32 si, di, cpycnt, src_prod_idx;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006142 int i, err = 0;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006143
6144 while (1) {
6145 src_prod_idx = spr->rx_std_prod_idx;
6146
6147 /* Make sure updates to the rx_std_buffers[] entries and the
6148 * standard producer index are seen in the correct order.
6149 */
6150 smp_rmb();
6151
6152 if (spr->rx_std_cons_idx == src_prod_idx)
6153 break;
6154
6155 if (spr->rx_std_cons_idx < src_prod_idx)
6156 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6157 else
Matt Carlson2c49a442010-09-30 10:34:35 +00006158 cpycnt = tp->rx_std_ring_mask + 1 -
6159 spr->rx_std_cons_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006160
Matt Carlson2c49a442010-09-30 10:34:35 +00006161 cpycnt = min(cpycnt,
6162 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006163
6164 si = spr->rx_std_cons_idx;
6165 di = dpr->rx_std_prod_idx;
6166
Matt Carlsone92967b2010-02-12 14:47:06 +00006167 for (i = di; i < di + cpycnt; i++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00006168 if (dpr->rx_std_buffers[i].data) {
Matt Carlsone92967b2010-02-12 14:47:06 +00006169 cpycnt = i - di;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006170 err = -ENOSPC;
Matt Carlsone92967b2010-02-12 14:47:06 +00006171 break;
6172 }
6173 }
6174
6175 if (!cpycnt)
6176 break;
6177
6178 /* Ensure that updates to the rx_std_buffers ring and the
6179 * shadowed hardware producer ring from tg3_recycle_skb() are
6180 * ordered correctly WRT the skb check above.
6181 */
6182 smp_rmb();
6183
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006184 memcpy(&dpr->rx_std_buffers[di],
6185 &spr->rx_std_buffers[si],
6186 cpycnt * sizeof(struct ring_info));
6187
6188 for (i = 0; i < cpycnt; i++, di++, si++) {
6189 struct tg3_rx_buffer_desc *sbd, *dbd;
6190 sbd = &spr->rx_std[si];
6191 dbd = &dpr->rx_std[di];
6192 dbd->addr_hi = sbd->addr_hi;
6193 dbd->addr_lo = sbd->addr_lo;
6194 }
6195
Matt Carlson2c49a442010-09-30 10:34:35 +00006196 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6197 tp->rx_std_ring_mask;
6198 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6199 tp->rx_std_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006200 }
6201
6202 while (1) {
6203 src_prod_idx = spr->rx_jmb_prod_idx;
6204
6205 /* Make sure updates to the rx_jmb_buffers[] entries and
6206 * the jumbo producer index are seen in the correct order.
6207 */
6208 smp_rmb();
6209
6210 if (spr->rx_jmb_cons_idx == src_prod_idx)
6211 break;
6212
6213 if (spr->rx_jmb_cons_idx < src_prod_idx)
6214 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6215 else
Matt Carlson2c49a442010-09-30 10:34:35 +00006216 cpycnt = tp->rx_jmb_ring_mask + 1 -
6217 spr->rx_jmb_cons_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006218
6219 cpycnt = min(cpycnt,
Matt Carlson2c49a442010-09-30 10:34:35 +00006220 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006221
6222 si = spr->rx_jmb_cons_idx;
6223 di = dpr->rx_jmb_prod_idx;
6224
Matt Carlsone92967b2010-02-12 14:47:06 +00006225 for (i = di; i < di + cpycnt; i++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00006226 if (dpr->rx_jmb_buffers[i].data) {
Matt Carlsone92967b2010-02-12 14:47:06 +00006227 cpycnt = i - di;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006228 err = -ENOSPC;
Matt Carlsone92967b2010-02-12 14:47:06 +00006229 break;
6230 }
6231 }
6232
6233 if (!cpycnt)
6234 break;
6235
6236 /* Ensure that updates to the rx_jmb_buffers ring and the
6237 * shadowed hardware producer ring from tg3_recycle_skb() are
6238 * ordered correctly WRT the skb check above.
6239 */
6240 smp_rmb();
6241
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006242 memcpy(&dpr->rx_jmb_buffers[di],
6243 &spr->rx_jmb_buffers[si],
6244 cpycnt * sizeof(struct ring_info));
6245
6246 for (i = 0; i < cpycnt; i++, di++, si++) {
6247 struct tg3_rx_buffer_desc *sbd, *dbd;
6248 sbd = &spr->rx_jmb[si].std;
6249 dbd = &dpr->rx_jmb[di].std;
6250 dbd->addr_hi = sbd->addr_hi;
6251 dbd->addr_lo = sbd->addr_lo;
6252 }
6253
Matt Carlson2c49a442010-09-30 10:34:35 +00006254 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6255 tp->rx_jmb_ring_mask;
6256 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6257 tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006258 }
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006259
6260 return err;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006261}
6262
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006263static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6264{
6265 struct tg3 *tp = tnapi->tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006266
6267 /* run TX completion thread */
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006268 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
Matt Carlson17375d22009-08-28 14:02:18 +00006269 tg3_tx(tnapi);
Joe Perches63c3a662011-04-26 08:12:10 +00006270 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
Michael Chan4fd7ab52007-10-12 01:39:50 -07006271 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272 }
6273
Matt Carlsonf891ea12012-04-24 13:37:01 +00006274 if (!tnapi->rx_rcb_prod_idx)
6275 return work_done;
6276
Linus Torvalds1da177e2005-04-16 15:20:36 -07006277 /* run RX thread, within the bounds set by NAPI.
6278 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006279 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07006280 */
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00006281 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
Matt Carlson17375d22009-08-28 14:02:18 +00006282 work_done += tg3_rx(tnapi, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006283
Joe Perches63c3a662011-04-26 08:12:10 +00006284 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00006285 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006286 int i, err = 0;
Matt Carlsone4af1af2010-02-12 14:47:05 +00006287 u32 std_prod_idx = dpr->rx_std_prod_idx;
6288 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006289
Michael Chan7ae52892012-03-21 15:38:33 +00006290 tp->rx_refill = false;
Michael Chan91024262012-09-28 07:12:38 +00006291 for (i = 1; i <= tp->rxq_cnt; i++)
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006292 err |= tg3_rx_prodring_xfer(tp, dpr,
Matt Carlson8fea32b2010-09-15 08:59:58 +00006293 &tp->napi[i].prodring);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006294
6295 wmb();
6296
Matt Carlsone4af1af2010-02-12 14:47:05 +00006297 if (std_prod_idx != dpr->rx_std_prod_idx)
6298 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6299 dpr->rx_std_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006300
Matt Carlsone4af1af2010-02-12 14:47:05 +00006301 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6302 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6303 dpr->rx_jmb_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006304
6305 mmiowb();
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006306
6307 if (err)
6308 tw32_f(HOSTCC_MODE, tp->coal_now);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006309 }
6310
David S. Miller6f535762007-10-11 18:08:29 -07006311 return work_done;
6312}
David S. Millerf7383c22005-05-18 22:50:53 -07006313
Matt Carlsondb219972011-11-04 09:15:03 +00006314static inline void tg3_reset_task_schedule(struct tg3 *tp)
6315{
6316 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6317 schedule_work(&tp->reset_task);
6318}
6319
6320static inline void tg3_reset_task_cancel(struct tg3 *tp)
6321{
6322 cancel_work_sync(&tp->reset_task);
6323 tg3_flag_clear(tp, RESET_TASK_PENDING);
Matt Carlsonc7101352012-02-22 12:35:20 +00006324 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
Matt Carlsondb219972011-11-04 09:15:03 +00006325}
6326
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006327static int tg3_poll_msix(struct napi_struct *napi, int budget)
6328{
6329 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6330 struct tg3 *tp = tnapi->tp;
6331 int work_done = 0;
6332 struct tg3_hw_status *sblk = tnapi->hw_status;
6333
6334 while (1) {
6335 work_done = tg3_poll_work(tnapi, work_done, budget);
6336
Joe Perches63c3a662011-04-26 08:12:10 +00006337 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006338 goto tx_recovery;
6339
6340 if (unlikely(work_done >= budget))
6341 break;
6342
Matt Carlsonc6cdf432010-04-05 10:19:26 +00006343 /* tp->last_tag is used in tg3_int_reenable() below
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006344 * to tell the hw how much work has been processed,
6345 * so we must read it before checking for more work.
6346 */
6347 tnapi->last_tag = sblk->status_tag;
6348 tnapi->last_irq_tag = tnapi->last_tag;
6349 rmb();
6350
6351 /* check for RX/TX work to do */
Matt Carlson6d40db72010-04-05 10:19:20 +00006352 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6353 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
Michael Chan7ae52892012-03-21 15:38:33 +00006354
6355 /* This test here is not race free, but will reduce
6356 * the number of interrupts by looping again.
6357 */
6358 if (tnapi == &tp->napi[1] && tp->rx_refill)
6359 continue;
6360
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006361 napi_complete(napi);
6362 /* Reenable interrupts. */
6363 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
Michael Chan7ae52892012-03-21 15:38:33 +00006364
6365 /* This test here is synchronized by napi_schedule()
6366 * and napi_complete() to close the race condition.
6367 */
6368 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6369 tw32(HOSTCC_MODE, tp->coalesce_mode |
6370 HOSTCC_MODE_ENABLE |
6371 tnapi->coal_now);
6372 }
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006373 mmiowb();
6374 break;
6375 }
6376 }
6377
6378 return work_done;
6379
6380tx_recovery:
6381 /* work_done is guaranteed to be less than budget. */
6382 napi_complete(napi);
Matt Carlsondb219972011-11-04 09:15:03 +00006383 tg3_reset_task_schedule(tp);
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006384 return work_done;
6385}
6386
Matt Carlsone64de4e2011-04-13 11:05:05 +00006387static void tg3_process_error(struct tg3 *tp)
6388{
6389 u32 val;
6390 bool real_error = false;
6391
Joe Perches63c3a662011-04-26 08:12:10 +00006392 if (tg3_flag(tp, ERROR_PROCESSED))
Matt Carlsone64de4e2011-04-13 11:05:05 +00006393 return;
6394
6395 /* Check Flow Attention register */
6396 val = tr32(HOSTCC_FLOW_ATTN);
6397 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6398 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6399 real_error = true;
6400 }
6401
6402 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6403 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6404 real_error = true;
6405 }
6406
6407 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6408 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6409 real_error = true;
6410 }
6411
6412 if (!real_error)
6413 return;
6414
6415 tg3_dump_state(tp);
6416
Joe Perches63c3a662011-04-26 08:12:10 +00006417 tg3_flag_set(tp, ERROR_PROCESSED);
Matt Carlsondb219972011-11-04 09:15:03 +00006418 tg3_reset_task_schedule(tp);
Matt Carlsone64de4e2011-04-13 11:05:05 +00006419}
6420
David S. Miller6f535762007-10-11 18:08:29 -07006421static int tg3_poll(struct napi_struct *napi, int budget)
6422{
Matt Carlson8ef04422009-08-28 14:01:37 +00006423 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6424 struct tg3 *tp = tnapi->tp;
David S. Miller6f535762007-10-11 18:08:29 -07006425 int work_done = 0;
Matt Carlson898a56f2009-08-28 14:02:40 +00006426 struct tg3_hw_status *sblk = tnapi->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07006427
6428 while (1) {
Matt Carlsone64de4e2011-04-13 11:05:05 +00006429 if (sblk->status & SD_STATUS_ERROR)
6430 tg3_process_error(tp);
6431
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006432 tg3_poll_link(tp);
6433
Matt Carlson17375d22009-08-28 14:02:18 +00006434 work_done = tg3_poll_work(tnapi, work_done, budget);
David S. Miller6f535762007-10-11 18:08:29 -07006435
Joe Perches63c3a662011-04-26 08:12:10 +00006436 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
David S. Miller6f535762007-10-11 18:08:29 -07006437 goto tx_recovery;
6438
6439 if (unlikely(work_done >= budget))
6440 break;
6441
Joe Perches63c3a662011-04-26 08:12:10 +00006442 if (tg3_flag(tp, TAGGED_STATUS)) {
Matt Carlson17375d22009-08-28 14:02:18 +00006443 /* tp->last_tag is used in tg3_int_reenable() below
Michael Chan4fd7ab52007-10-12 01:39:50 -07006444 * to tell the hw how much work has been processed,
6445 * so we must read it before checking for more work.
6446 */
Matt Carlson898a56f2009-08-28 14:02:40 +00006447 tnapi->last_tag = sblk->status_tag;
6448 tnapi->last_irq_tag = tnapi->last_tag;
Michael Chan4fd7ab52007-10-12 01:39:50 -07006449 rmb();
6450 } else
6451 sblk->status &= ~SD_STATUS_UPDATED;
6452
Matt Carlson17375d22009-08-28 14:02:18 +00006453 if (likely(!tg3_has_work(tnapi))) {
Ben Hutchings288379f2009-01-19 16:43:59 -08006454 napi_complete(napi);
Matt Carlson17375d22009-08-28 14:02:18 +00006455 tg3_int_reenable(tnapi);
David S. Miller6f535762007-10-11 18:08:29 -07006456 break;
6457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006458 }
6459
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006460 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07006461
6462tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07006463 /* work_done is guaranteed to be less than budget. */
Ben Hutchings288379f2009-01-19 16:43:59 -08006464 napi_complete(napi);
Matt Carlsondb219972011-11-04 09:15:03 +00006465 tg3_reset_task_schedule(tp);
Michael Chan4fd7ab52007-10-12 01:39:50 -07006466 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006467}
6468
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006469static void tg3_napi_disable(struct tg3 *tp)
6470{
6471 int i;
6472
6473 for (i = tp->irq_cnt - 1; i >= 0; i--)
6474 napi_disable(&tp->napi[i].napi);
6475}
6476
6477static void tg3_napi_enable(struct tg3 *tp)
6478{
6479 int i;
6480
6481 for (i = 0; i < tp->irq_cnt; i++)
6482 napi_enable(&tp->napi[i].napi);
6483}
6484
6485static void tg3_napi_init(struct tg3 *tp)
6486{
6487 int i;
6488
6489 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6490 for (i = 1; i < tp->irq_cnt; i++)
6491 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6492}
6493
6494static void tg3_napi_fini(struct tg3 *tp)
6495{
6496 int i;
6497
6498 for (i = 0; i < tp->irq_cnt; i++)
6499 netif_napi_del(&tp->napi[i].napi);
6500}
6501
6502static inline void tg3_netif_stop(struct tg3 *tp)
6503{
6504 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6505 tg3_napi_disable(tp);
6506 netif_tx_disable(tp->dev);
6507}
6508
6509static inline void tg3_netif_start(struct tg3 *tp)
6510{
6511 /* NOTE: unconditional netif_tx_wake_all_queues is only
6512 * appropriate so long as all callers are assured to
6513 * have free tx slots (such as after tg3_init_hw)
6514 */
6515 netif_tx_wake_all_queues(tp->dev);
6516
6517 tg3_napi_enable(tp);
6518 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6519 tg3_enable_ints(tp);
6520}
6521
David S. Millerf47c11e2005-06-24 20:18:35 -07006522static void tg3_irq_quiesce(struct tg3 *tp)
6523{
Matt Carlson4f125f42009-09-01 12:55:02 +00006524 int i;
6525
David S. Millerf47c11e2005-06-24 20:18:35 -07006526 BUG_ON(tp->irq_sync);
6527
6528 tp->irq_sync = 1;
6529 smp_mb();
6530
Matt Carlson4f125f42009-09-01 12:55:02 +00006531 for (i = 0; i < tp->irq_cnt; i++)
6532 synchronize_irq(tp->napi[i].irq_vec);
David S. Millerf47c11e2005-06-24 20:18:35 -07006533}
6534
David S. Millerf47c11e2005-06-24 20:18:35 -07006535/* Fully shutdown all tg3 driver activity elsewhere in the system.
6536 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6537 * with as well. Most of the time, this is not necessary except when
6538 * shutting down the device.
6539 */
6540static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6541{
Michael Chan46966542007-07-11 19:47:19 -07006542 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07006543 if (irq_sync)
6544 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07006545}
6546
6547static inline void tg3_full_unlock(struct tg3 *tp)
6548{
David S. Millerf47c11e2005-06-24 20:18:35 -07006549 spin_unlock_bh(&tp->lock);
6550}
6551
Michael Chanfcfa0a32006-03-20 22:28:41 -08006552/* One-shot MSI handler - Chip automatically disables interrupt
6553 * after sending MSI so driver doesn't have to do it.
6554 */
David Howells7d12e782006-10-05 14:55:46 +01006555static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08006556{
Matt Carlson09943a12009-08-28 14:01:57 +00006557 struct tg3_napi *tnapi = dev_id;
6558 struct tg3 *tp = tnapi->tp;
Michael Chanfcfa0a32006-03-20 22:28:41 -08006559
Matt Carlson898a56f2009-08-28 14:02:40 +00006560 prefetch(tnapi->hw_status);
Matt Carlson0c1d0e22009-09-01 13:16:33 +00006561 if (tnapi->rx_rcb)
6562 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Michael Chanfcfa0a32006-03-20 22:28:41 -08006563
6564 if (likely(!tg3_irq_sync(tp)))
Matt Carlson09943a12009-08-28 14:01:57 +00006565 napi_schedule(&tnapi->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08006566
6567 return IRQ_HANDLED;
6568}
6569
Michael Chan88b06bc22005-04-21 17:13:25 -07006570/* MSI ISR - No need to check for interrupt sharing and no need to
6571 * flush status block and interrupt mailbox. PCI ordering rules
6572 * guarantee that MSI will arrive after the status block.
6573 */
David Howells7d12e782006-10-05 14:55:46 +01006574static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc22005-04-21 17:13:25 -07006575{
Matt Carlson09943a12009-08-28 14:01:57 +00006576 struct tg3_napi *tnapi = dev_id;
6577 struct tg3 *tp = tnapi->tp;
Michael Chan88b06bc22005-04-21 17:13:25 -07006578
Matt Carlson898a56f2009-08-28 14:02:40 +00006579 prefetch(tnapi->hw_status);
Matt Carlson0c1d0e22009-09-01 13:16:33 +00006580 if (tnapi->rx_rcb)
6581 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Michael Chan88b06bc22005-04-21 17:13:25 -07006582 /*
David S. Millerfac9b832005-05-18 22:46:34 -07006583 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc22005-04-21 17:13:25 -07006584 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07006585 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc22005-04-21 17:13:25 -07006586 * NIC to stop sending us irqs, engaging "in-intr-handler"
6587 * event coalescing.
6588 */
Matt Carlson5b39de92011-08-31 11:44:50 +00006589 tw32_mailbox(tnapi->int_mbox, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07006590 if (likely(!tg3_irq_sync(tp)))
Matt Carlson09943a12009-08-28 14:01:57 +00006591 napi_schedule(&tnapi->napi);
Michael Chan61487482005-09-05 17:53:19 -07006592
Michael Chan88b06bc22005-04-21 17:13:25 -07006593 return IRQ_RETVAL(1);
6594}
6595
David Howells7d12e782006-10-05 14:55:46 +01006596static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597{
Matt Carlson09943a12009-08-28 14:01:57 +00006598 struct tg3_napi *tnapi = dev_id;
6599 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00006600 struct tg3_hw_status *sblk = tnapi->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601 unsigned int handled = 1;
6602
Linus Torvalds1da177e2005-04-16 15:20:36 -07006603 /* In INTx mode, it is possible for the interrupt to arrive at
6604 * the CPU before the status block posted prior to the interrupt.
6605 * Reading the PCI State register will confirm whether the
6606 * interrupt is ours and will flush the status block.
6607 */
Michael Chand18edcb2007-03-24 20:57:11 -07006608 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
Joe Perches63c3a662011-04-26 08:12:10 +00006609 if (tg3_flag(tp, CHIP_RESETTING) ||
Michael Chand18edcb2007-03-24 20:57:11 -07006610 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6611 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07006612 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07006613 }
Michael Chand18edcb2007-03-24 20:57:11 -07006614 }
6615
6616 /*
6617 * Writing any value to intr-mbox-0 clears PCI INTA# and
6618 * chip-internal interrupt pending events.
6619 * Writing non-zero to intr-mbox-0 additional tells the
6620 * NIC to stop sending us irqs, engaging "in-intr-handler"
6621 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07006622 *
6623 * Flush the mailbox to de-assert the IRQ immediately to prevent
6624 * spurious interrupts. The flush impacts performance but
6625 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07006626 */
Michael Chanc04cb342007-05-07 00:26:15 -07006627 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07006628 if (tg3_irq_sync(tp))
6629 goto out;
6630 sblk->status &= ~SD_STATUS_UPDATED;
Matt Carlson17375d22009-08-28 14:02:18 +00006631 if (likely(tg3_has_work(tnapi))) {
Matt Carlson72334482009-08-28 14:03:01 +00006632 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Matt Carlson09943a12009-08-28 14:01:57 +00006633 napi_schedule(&tnapi->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07006634 } else {
6635 /* No work, shared interrupt perhaps? re-enable
6636 * interrupts, and flush that PCI write
6637 */
6638 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6639 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07006640 }
David S. Millerf47c11e2005-06-24 20:18:35 -07006641out:
David S. Millerfac9b832005-05-18 22:46:34 -07006642 return IRQ_RETVAL(handled);
6643}
6644
David Howells7d12e782006-10-05 14:55:46 +01006645static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07006646{
Matt Carlson09943a12009-08-28 14:01:57 +00006647 struct tg3_napi *tnapi = dev_id;
6648 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00006649 struct tg3_hw_status *sblk = tnapi->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07006650 unsigned int handled = 1;
6651
David S. Millerfac9b832005-05-18 22:46:34 -07006652 /* In INTx mode, it is possible for the interrupt to arrive at
6653 * the CPU before the status block posted prior to the interrupt.
6654 * Reading the PCI State register will confirm whether the
6655 * interrupt is ours and will flush the status block.
6656 */
Matt Carlson898a56f2009-08-28 14:02:40 +00006657 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
Joe Perches63c3a662011-04-26 08:12:10 +00006658 if (tg3_flag(tp, CHIP_RESETTING) ||
Michael Chand18edcb2007-03-24 20:57:11 -07006659 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6660 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07006661 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662 }
Michael Chand18edcb2007-03-24 20:57:11 -07006663 }
6664
6665 /*
6666 * writing any value to intr-mbox-0 clears PCI INTA# and
6667 * chip-internal interrupt pending events.
6668 * writing non-zero to intr-mbox-0 additional tells the
6669 * NIC to stop sending us irqs, engaging "in-intr-handler"
6670 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07006671 *
6672 * Flush the mailbox to de-assert the IRQ immediately to prevent
6673 * spurious interrupts. The flush impacts performance but
6674 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07006675 */
Michael Chanc04cb342007-05-07 00:26:15 -07006676 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Matt Carlson624f8e52009-04-20 06:55:01 +00006677
6678 /*
6679 * In a shared interrupt configuration, sometimes other devices'
6680 * interrupts will scream. We record the current status tag here
6681 * so that the above check can report that the screaming interrupts
6682 * are unhandled. Eventually they will be silenced.
6683 */
Matt Carlson898a56f2009-08-28 14:02:40 +00006684 tnapi->last_irq_tag = sblk->status_tag;
Matt Carlson624f8e52009-04-20 06:55:01 +00006685
Michael Chand18edcb2007-03-24 20:57:11 -07006686 if (tg3_irq_sync(tp))
6687 goto out;
Matt Carlson624f8e52009-04-20 06:55:01 +00006688
Matt Carlson72334482009-08-28 14:03:01 +00006689 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Matt Carlson624f8e52009-04-20 06:55:01 +00006690
Matt Carlson09943a12009-08-28 14:01:57 +00006691 napi_schedule(&tnapi->napi);
Matt Carlson624f8e52009-04-20 06:55:01 +00006692
David S. Millerf47c11e2005-06-24 20:18:35 -07006693out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006694 return IRQ_RETVAL(handled);
6695}
6696
Michael Chan79381092005-04-21 17:13:59 -07006697/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01006698static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07006699{
Matt Carlson09943a12009-08-28 14:01:57 +00006700 struct tg3_napi *tnapi = dev_id;
6701 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00006702 struct tg3_hw_status *sblk = tnapi->hw_status;
Michael Chan79381092005-04-21 17:13:59 -07006703
Michael Chanf9804dd2005-09-27 12:13:10 -07006704 if ((sblk->status & SD_STATUS_UPDATED) ||
6705 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07006706 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07006707 return IRQ_RETVAL(1);
6708 }
6709 return IRQ_RETVAL(0);
6710}
6711
Linus Torvalds1da177e2005-04-16 15:20:36 -07006712#ifdef CONFIG_NET_POLL_CONTROLLER
6713static void tg3_poll_controller(struct net_device *dev)
6714{
Matt Carlson4f125f42009-09-01 12:55:02 +00006715 int i;
Michael Chan88b06bc22005-04-21 17:13:25 -07006716 struct tg3 *tp = netdev_priv(dev);
6717
Matt Carlson4f125f42009-09-01 12:55:02 +00006718 for (i = 0; i < tp->irq_cnt; i++)
Louis Rillingfe234f02010-03-09 06:14:41 +00006719 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006720}
6721#endif
6722
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723static void tg3_tx_timeout(struct net_device *dev)
6724{
6725 struct tg3 *tp = netdev_priv(dev);
6726
Michael Chanb0408752007-02-13 12:18:30 -08006727 if (netif_msg_tx_err(tp)) {
Joe Perches05dbe002010-02-17 19:44:19 +00006728 netdev_err(dev, "transmit timed out, resetting\n");
Matt Carlson97bd8e42011-04-13 11:05:04 +00006729 tg3_dump_state(tp);
Michael Chanb0408752007-02-13 12:18:30 -08006730 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006731
Matt Carlsondb219972011-11-04 09:15:03 +00006732 tg3_reset_task_schedule(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006733}
6734
Michael Chanc58ec932005-09-17 00:46:27 -07006735/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6736static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6737{
6738 u32 base = (u32) mapping & 0xffffffff;
6739
Eric Dumazet807540b2010-09-23 05:40:09 +00006740 return (base > 0xffffdcc0) && (base + len + 8 < base);
Michael Chanc58ec932005-09-17 00:46:27 -07006741}
6742
Michael Chan72f2afb2006-03-06 19:28:35 -08006743/* Test for DMA addresses > 40-bit */
6744static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6745 int len)
6746{
6747#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Joe Perches63c3a662011-04-26 08:12:10 +00006748 if (tg3_flag(tp, 40BIT_DMA_BUG))
Eric Dumazet807540b2010-09-23 05:40:09 +00006749 return ((u64) mapping + len) > DMA_BIT_MASK(40);
Michael Chan72f2afb2006-03-06 19:28:35 -08006750 return 0;
6751#else
6752 return 0;
6753#endif
6754}
6755
Matt Carlsond1a3b732011-07-27 14:20:51 +00006756static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
Matt Carlson92cd3a12011-07-27 14:20:47 +00006757 dma_addr_t mapping, u32 len, u32 flags,
6758 u32 mss, u32 vlan)
Matt Carlson2ffcc982011-05-19 12:12:44 +00006759{
Matt Carlson92cd3a12011-07-27 14:20:47 +00006760 txbd->addr_hi = ((u64) mapping >> 32);
6761 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6762 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6763 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
Matt Carlson2ffcc982011-05-19 12:12:44 +00006764}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765
Matt Carlson84b67b22011-07-27 14:20:52 +00006766static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
Matt Carlsond1a3b732011-07-27 14:20:51 +00006767 dma_addr_t map, u32 len, u32 flags,
6768 u32 mss, u32 vlan)
6769{
6770 struct tg3 *tp = tnapi->tp;
6771 bool hwbug = false;
6772
6773 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
Rusty Russell3db1cd52011-12-19 13:56:45 +00006774 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00006775
6776 if (tg3_4g_overflow_test(map, len))
Rusty Russell3db1cd52011-12-19 13:56:45 +00006777 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00006778
6779 if (tg3_40bit_overflow_test(tp, map, len))
Rusty Russell3db1cd52011-12-19 13:56:45 +00006780 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00006781
Matt Carlsona4cb4282011-12-14 11:09:58 +00006782 if (tp->dma_limit) {
Matt Carlsonb9e45482011-11-04 09:14:59 +00006783 u32 prvidx = *entry;
Matt Carlsone31aa982011-07-27 14:20:53 +00006784 u32 tmp_flag = flags & ~TXD_FLAG_END;
Matt Carlsona4cb4282011-12-14 11:09:58 +00006785 while (len > tp->dma_limit && *budget) {
6786 u32 frag_len = tp->dma_limit;
6787 len -= tp->dma_limit;
Matt Carlsone31aa982011-07-27 14:20:53 +00006788
Matt Carlsonb9e45482011-11-04 09:14:59 +00006789 /* Avoid the 8byte DMA problem */
6790 if (len <= 8) {
Matt Carlsona4cb4282011-12-14 11:09:58 +00006791 len += tp->dma_limit / 2;
6792 frag_len = tp->dma_limit / 2;
Matt Carlsone31aa982011-07-27 14:20:53 +00006793 }
6794
Matt Carlsonb9e45482011-11-04 09:14:59 +00006795 tnapi->tx_buffers[*entry].fragmented = true;
6796
6797 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6798 frag_len, tmp_flag, mss, vlan);
6799 *budget -= 1;
6800 prvidx = *entry;
6801 *entry = NEXT_TX(*entry);
6802
Matt Carlsone31aa982011-07-27 14:20:53 +00006803 map += frag_len;
6804 }
6805
6806 if (len) {
6807 if (*budget) {
6808 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6809 len, flags, mss, vlan);
Matt Carlsonb9e45482011-11-04 09:14:59 +00006810 *budget -= 1;
Matt Carlsone31aa982011-07-27 14:20:53 +00006811 *entry = NEXT_TX(*entry);
6812 } else {
Rusty Russell3db1cd52011-12-19 13:56:45 +00006813 hwbug = true;
Matt Carlsonb9e45482011-11-04 09:14:59 +00006814 tnapi->tx_buffers[prvidx].fragmented = false;
Matt Carlsone31aa982011-07-27 14:20:53 +00006815 }
6816 }
6817 } else {
Matt Carlson84b67b22011-07-27 14:20:52 +00006818 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6819 len, flags, mss, vlan);
Matt Carlsone31aa982011-07-27 14:20:53 +00006820 *entry = NEXT_TX(*entry);
6821 }
Matt Carlsond1a3b732011-07-27 14:20:51 +00006822
6823 return hwbug;
6824}
6825
Matt Carlson0d681b22011-07-27 14:20:49 +00006826static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
Matt Carlson432aa7e2011-05-19 12:12:45 +00006827{
6828 int i;
Matt Carlson0d681b22011-07-27 14:20:49 +00006829 struct sk_buff *skb;
Matt Carlsondf8944c2011-07-27 14:20:46 +00006830 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
Matt Carlson432aa7e2011-05-19 12:12:45 +00006831
Matt Carlson0d681b22011-07-27 14:20:49 +00006832 skb = txb->skb;
6833 txb->skb = NULL;
6834
Matt Carlson432aa7e2011-05-19 12:12:45 +00006835 pci_unmap_single(tnapi->tp->pdev,
6836 dma_unmap_addr(txb, mapping),
6837 skb_headlen(skb),
6838 PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00006839
6840 while (txb->fragmented) {
6841 txb->fragmented = false;
6842 entry = NEXT_TX(entry);
6843 txb = &tnapi->tx_buffers[entry];
6844 }
6845
Matt Carlsonba1142e2011-11-04 09:15:00 +00006846 for (i = 0; i <= last; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00006847 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Matt Carlson432aa7e2011-05-19 12:12:45 +00006848
6849 entry = NEXT_TX(entry);
6850 txb = &tnapi->tx_buffers[entry];
6851
6852 pci_unmap_page(tnapi->tp->pdev,
6853 dma_unmap_addr(txb, mapping),
Eric Dumazet9e903e02011-10-18 21:00:24 +00006854 skb_frag_size(frag), PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00006855
6856 while (txb->fragmented) {
6857 txb->fragmented = false;
6858 entry = NEXT_TX(entry);
6859 txb = &tnapi->tx_buffers[entry];
6860 }
Matt Carlson432aa7e2011-05-19 12:12:45 +00006861 }
6862}
6863
Michael Chan72f2afb2006-03-06 19:28:35 -08006864/* Workaround 4GB and 40-bit hardware DMA bugs. */
Matt Carlson24f4efd2009-11-13 13:03:35 +00006865static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
David S. Miller1805b2f2011-10-24 18:18:09 -04006866 struct sk_buff **pskb,
Matt Carlson84b67b22011-07-27 14:20:52 +00006867 u32 *entry, u32 *budget,
Matt Carlson92cd3a12011-07-27 14:20:47 +00006868 u32 base_flags, u32 mss, u32 vlan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006869{
Matt Carlson24f4efd2009-11-13 13:03:35 +00006870 struct tg3 *tp = tnapi->tp;
David S. Miller1805b2f2011-10-24 18:18:09 -04006871 struct sk_buff *new_skb, *skb = *pskb;
Michael Chanc58ec932005-09-17 00:46:27 -07006872 dma_addr_t new_addr = 0;
Matt Carlson432aa7e2011-05-19 12:12:45 +00006873 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006874
Matt Carlson41588ba2008-04-19 18:12:33 -07006875 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6876 new_skb = skb_copy(skb, GFP_ATOMIC);
6877 else {
6878 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6879
6880 new_skb = skb_copy_expand(skb,
6881 skb_headroom(skb) + more_headroom,
6882 skb_tailroom(skb), GFP_ATOMIC);
6883 }
6884
Linus Torvalds1da177e2005-04-16 15:20:36 -07006885 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07006886 ret = -1;
6887 } else {
6888 /* New SKB is guaranteed to be linear. */
Alexander Duyckf4188d82009-12-02 16:48:38 +00006889 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6890 PCI_DMA_TODEVICE);
6891 /* Make sure the mapping succeeded */
6892 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
Alexander Duyckf4188d82009-12-02 16:48:38 +00006893 dev_kfree_skb(new_skb);
Michael Chanc58ec932005-09-17 00:46:27 -07006894 ret = -1;
Michael Chanc58ec932005-09-17 00:46:27 -07006895 } else {
Matt Carlsonb9e45482011-11-04 09:14:59 +00006896 u32 save_entry = *entry;
6897
Matt Carlson92cd3a12011-07-27 14:20:47 +00006898 base_flags |= TXD_FLAG_END;
6899
Matt Carlson84b67b22011-07-27 14:20:52 +00006900 tnapi->tx_buffers[*entry].skb = new_skb;
6901 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
Matt Carlson432aa7e2011-05-19 12:12:45 +00006902 mapping, new_addr);
6903
Matt Carlson84b67b22011-07-27 14:20:52 +00006904 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
Matt Carlsond1a3b732011-07-27 14:20:51 +00006905 new_skb->len, base_flags,
6906 mss, vlan)) {
Matt Carlsonba1142e2011-11-04 09:15:00 +00006907 tg3_tx_skb_unmap(tnapi, save_entry, -1);
Matt Carlsond1a3b732011-07-27 14:20:51 +00006908 dev_kfree_skb(new_skb);
6909 ret = -1;
6910 }
Michael Chanc58ec932005-09-17 00:46:27 -07006911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006912 }
6913
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914 dev_kfree_skb(skb);
David S. Miller1805b2f2011-10-24 18:18:09 -04006915 *pskb = new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07006916 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006917}
6918
Matt Carlson2ffcc982011-05-19 12:12:44 +00006919static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
Michael Chan52c0fd82006-06-29 20:15:54 -07006920
6921/* Use GSO to workaround a rare TSO bug that may be triggered when the
6922 * TSO header is greater than 80 bytes.
6923 */
6924static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6925{
6926 struct sk_buff *segs, *nskb;
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006927 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
Michael Chan52c0fd82006-06-29 20:15:54 -07006928
6929 /* Estimate the number of fragments in the worst case */
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006930 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
Michael Chan52c0fd82006-06-29 20:15:54 -07006931 netif_stop_queue(tp->dev);
Matt Carlsonf65aac12010-08-02 11:26:03 +00006932
6933 /* netif_tx_stop_queue() must be done before checking
6934 * checking tx index in tg3_tx_avail() below, because in
6935 * tg3_tx(), we update tx index before checking for
6936 * netif_tx_queue_stopped().
6937 */
6938 smp_mb();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006939 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
Michael Chan7f62ad52007-02-20 23:25:40 -08006940 return NETDEV_TX_BUSY;
6941
6942 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07006943 }
6944
6945 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07006946 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07006947 goto tg3_tso_bug_end;
6948
6949 do {
6950 nskb = segs;
6951 segs = segs->next;
6952 nskb->next = NULL;
Matt Carlson2ffcc982011-05-19 12:12:44 +00006953 tg3_start_xmit(nskb, tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07006954 } while (segs);
6955
6956tg3_tso_bug_end:
6957 dev_kfree_skb(skb);
6958
6959 return NETDEV_TX_OK;
6960}
Michael Chan52c0fd82006-06-29 20:15:54 -07006961
Michael Chan5a6f3072006-03-20 22:28:05 -08006962/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
Joe Perches63c3a662011-04-26 08:12:10 +00006963 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
Michael Chan5a6f3072006-03-20 22:28:05 -08006964 */
Matt Carlson2ffcc982011-05-19 12:12:44 +00006965static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
Michael Chan5a6f3072006-03-20 22:28:05 -08006966{
6967 struct tg3 *tp = netdev_priv(dev);
Matt Carlson92cd3a12011-07-27 14:20:47 +00006968 u32 len, entry, base_flags, mss, vlan = 0;
Matt Carlson84b67b22011-07-27 14:20:52 +00006969 u32 budget;
Matt Carlson432aa7e2011-05-19 12:12:45 +00006970 int i = -1, would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07006971 dma_addr_t mapping;
Matt Carlson24f4efd2009-11-13 13:03:35 +00006972 struct tg3_napi *tnapi;
6973 struct netdev_queue *txq;
Matt Carlson432aa7e2011-05-19 12:12:45 +00006974 unsigned int last;
Alexander Duyckf4188d82009-12-02 16:48:38 +00006975
Matt Carlson24f4efd2009-11-13 13:03:35 +00006976 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6977 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
Joe Perches63c3a662011-04-26 08:12:10 +00006978 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlson24f4efd2009-11-13 13:03:35 +00006979 tnapi++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006980
Matt Carlson84b67b22011-07-27 14:20:52 +00006981 budget = tg3_tx_avail(tnapi);
6982
Michael Chan00b70502006-06-17 21:58:45 -07006983 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006984 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07006985 * interrupt. Furthermore, IRQ processing runs lockless so we have
6986 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07006987 */
Matt Carlson84b67b22011-07-27 14:20:52 +00006988 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
Matt Carlson24f4efd2009-11-13 13:03:35 +00006989 if (!netif_tx_queue_stopped(txq)) {
6990 netif_tx_stop_queue(txq);
Stephen Hemminger1f064a82005-12-06 17:36:44 -08006991
6992 /* This is a hard error, log it. */
Matt Carlson5129c3a2010-04-05 10:19:23 +00006993 netdev_err(dev,
6994 "BUG! Tx Ring full when queue awake!\n");
Stephen Hemminger1f064a82005-12-06 17:36:44 -08006995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006996 return NETDEV_TX_BUSY;
6997 }
6998
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006999 entry = tnapi->tx_prod;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007000 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07007001 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Matt Carlson24f4efd2009-11-13 13:03:35 +00007003
Matt Carlsonbe98da62010-07-11 09:31:46 +00007004 mss = skb_shinfo(skb)->gso_size;
7005 if (mss) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007006 struct iphdr *iph;
Matt Carlson34195c32010-07-11 09:31:42 +00007007 u32 tcp_opt_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007008
7009 if (skb_header_cloned(skb) &&
Eric Dumazet48855432011-10-24 07:53:03 +00007010 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7011 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007012
Matt Carlson34195c32010-07-11 09:31:42 +00007013 iph = ip_hdr(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07007014 tcp_opt_len = tcp_optlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007015
Eric Dumazeta5a11952012-01-23 01:22:09 +00007016 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
Matt Carlson34195c32010-07-11 09:31:42 +00007017
Eric Dumazeta5a11952012-01-23 01:22:09 +00007018 if (!skb_is_gso_v6(skb)) {
Matt Carlson34195c32010-07-11 09:31:42 +00007019 iph->check = 0;
7020 iph->tot_len = htons(mss + hdr_len);
7021 }
7022
Michael Chan52c0fd82006-06-29 20:15:54 -07007023 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Joe Perches63c3a662011-04-26 08:12:10 +00007024 tg3_flag(tp, TSO_BUG))
Matt Carlsonde6f31e2010-04-12 06:58:30 +00007025 return tg3_tso_bug(tp, skb);
Michael Chan52c0fd82006-06-29 20:15:54 -07007026
Linus Torvalds1da177e2005-04-16 15:20:36 -07007027 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7028 TXD_FLAG_CPU_POST_DMA);
7029
Joe Perches63c3a662011-04-26 08:12:10 +00007030 if (tg3_flag(tp, HW_TSO_1) ||
7031 tg3_flag(tp, HW_TSO_2) ||
7032 tg3_flag(tp, HW_TSO_3)) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07007033 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007034 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07007035 } else
7036 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7037 iph->daddr, 0,
7038 IPPROTO_TCP,
7039 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007040
Joe Perches63c3a662011-04-26 08:12:10 +00007041 if (tg3_flag(tp, HW_TSO_3)) {
Matt Carlson615774f2009-11-13 13:03:39 +00007042 mss |= (hdr_len & 0xc) << 12;
7043 if (hdr_len & 0x10)
7044 base_flags |= 0x00000010;
7045 base_flags |= (hdr_len & 0x3e0) << 5;
Joe Perches63c3a662011-04-26 08:12:10 +00007046 } else if (tg3_flag(tp, HW_TSO_2))
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007047 mss |= hdr_len << 9;
Joe Perches63c3a662011-04-26 08:12:10 +00007048 else if (tg3_flag(tp, HW_TSO_1) ||
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007050 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007051 int tsflags;
7052
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007053 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007054 mss |= (tsflags << 11);
7055 }
7056 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007057 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007058 int tsflags;
7059
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007060 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007061 base_flags |= tsflags << 12;
7062 }
7063 }
7064 }
Matt Carlsonbf933c82011-01-25 15:58:49 +00007065
Matt Carlson93a700a2011-08-31 11:44:54 +00007066 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7067 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7068 base_flags |= TXD_FLAG_JMB_PKT;
7069
Matt Carlson92cd3a12011-07-27 14:20:47 +00007070 if (vlan_tx_tag_present(skb)) {
7071 base_flags |= TXD_FLAG_VLAN;
7072 vlan = vlan_tx_tag_get(skb);
7073 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007074
Alexander Duyckf4188d82009-12-02 16:48:38 +00007075 len = skb_headlen(skb);
7076
7077 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Eric Dumazet48855432011-10-24 07:53:03 +00007078 if (pci_dma_mapping_error(tp->pdev, mapping))
7079 goto drop;
7080
David S. Miller90079ce2008-09-11 04:52:51 -07007081
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007082 tnapi->tx_buffers[entry].skb = skb;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00007083 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007084
7085 would_hit_hwbug = 0;
7086
Joe Perches63c3a662011-04-26 08:12:10 +00007087 if (tg3_flag(tp, 5701_DMA_BUG))
Michael Chanc58ec932005-09-17 00:46:27 -07007088 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007089
Matt Carlson84b67b22011-07-27 14:20:52 +00007090 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
Matt Carlsond1a3b732011-07-27 14:20:51 +00007091 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
Matt Carlsonba1142e2011-11-04 09:15:00 +00007092 mss, vlan)) {
Matt Carlsond1a3b732011-07-27 14:20:51 +00007093 would_hit_hwbug = 1;
Matt Carlsonba1142e2011-11-04 09:15:00 +00007094 } else if (skb_shinfo(skb)->nr_frags > 0) {
Matt Carlson92cd3a12011-07-27 14:20:47 +00007095 u32 tmp_mss = mss;
7096
7097 if (!tg3_flag(tp, HW_TSO_1) &&
7098 !tg3_flag(tp, HW_TSO_2) &&
7099 !tg3_flag(tp, HW_TSO_3))
7100 tmp_mss = 0;
7101
Matt Carlsonc5665a52012-02-13 10:20:12 +00007102 /* Now loop through additional data
7103 * fragments, and queue them.
7104 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007105 last = skb_shinfo(skb)->nr_frags - 1;
7106 for (i = 0; i <= last; i++) {
7107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7108
Eric Dumazet9e903e02011-10-18 21:00:24 +00007109 len = skb_frag_size(frag);
Ian Campbelldc234d02011-08-24 22:28:11 +00007110 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01007111 len, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007112
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007113 tnapi->tx_buffers[entry].skb = NULL;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00007114 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
Alexander Duyckf4188d82009-12-02 16:48:38 +00007115 mapping);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01007116 if (dma_mapping_error(&tp->pdev->dev, mapping))
Alexander Duyckf4188d82009-12-02 16:48:38 +00007117 goto dma_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007118
Matt Carlsonb9e45482011-11-04 09:14:59 +00007119 if (!budget ||
7120 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
Matt Carlson84b67b22011-07-27 14:20:52 +00007121 len, base_flags |
7122 ((i == last) ? TXD_FLAG_END : 0),
Matt Carlsonb9e45482011-11-04 09:14:59 +00007123 tmp_mss, vlan)) {
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007124 would_hit_hwbug = 1;
Matt Carlsonb9e45482011-11-04 09:14:59 +00007125 break;
7126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127 }
7128 }
7129
7130 if (would_hit_hwbug) {
Matt Carlson0d681b22011-07-27 14:20:49 +00007131 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007132
7133 /* If the workaround fails due to memory/mapping
7134 * failure, silently drop this packet.
7135 */
Matt Carlson84b67b22011-07-27 14:20:52 +00007136 entry = tnapi->tx_prod;
7137 budget = tg3_tx_avail(tnapi);
David S. Miller1805b2f2011-10-24 18:18:09 -04007138 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
Matt Carlson84b67b22011-07-27 14:20:52 +00007139 base_flags, mss, vlan))
Eric Dumazet48855432011-10-24 07:53:03 +00007140 goto drop_nofree;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007141 }
7142
Richard Cochrand515b452011-06-19 03:31:41 +00007143 skb_tx_timestamp(skb);
Tom Herbert5cb917b2012-03-05 19:53:50 +00007144 netdev_tx_sent_queue(txq, skb->len);
Richard Cochrand515b452011-06-19 03:31:41 +00007145
Michael Chan6541b802012-03-04 14:48:14 +00007146 /* Sync BD data before updating mailbox */
7147 wmb();
7148
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149 /* Packets are ready, update Tx producer idx local and on card. */
Matt Carlson24f4efd2009-11-13 13:03:35 +00007150 tw32_tx_mbox(tnapi->prodmbox, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007151
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007152 tnapi->tx_prod = entry;
7153 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
Matt Carlson24f4efd2009-11-13 13:03:35 +00007154 netif_tx_stop_queue(txq);
Matt Carlsonf65aac12010-08-02 11:26:03 +00007155
7156 /* netif_tx_stop_queue() must be done before checking
7157 * checking tx index in tg3_tx_avail() below, because in
7158 * tg3_tx(), we update tx index before checking for
7159 * netif_tx_queue_stopped().
7160 */
7161 smp_mb();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007162 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
Matt Carlson24f4efd2009-11-13 13:03:35 +00007163 netif_tx_wake_queue(txq);
Michael Chan51b91462005-09-01 17:41:28 -07007164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007165
Eric Dumazetcdd0db02009-05-28 00:00:41 +00007166 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167 return NETDEV_TX_OK;
Alexander Duyckf4188d82009-12-02 16:48:38 +00007168
7169dma_error:
Matt Carlsonba1142e2011-11-04 09:15:00 +00007170 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
Matt Carlson432aa7e2011-05-19 12:12:45 +00007171 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
Eric Dumazet48855432011-10-24 07:53:03 +00007172drop:
7173 dev_kfree_skb(skb);
7174drop_nofree:
7175 tp->tx_dropped++;
Alexander Duyckf4188d82009-12-02 16:48:38 +00007176 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007177}
7178
Matt Carlson6e01b202011-08-19 13:58:20 +00007179static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7180{
7181 if (enable) {
7182 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7183 MAC_MODE_PORT_MODE_MASK);
7184
7185 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7186
7187 if (!tg3_flag(tp, 5705_PLUS))
7188 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7189
7190 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7191 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7192 else
7193 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7194 } else {
7195 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7196
7197 if (tg3_flag(tp, 5705_PLUS) ||
7198 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7200 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7201 }
7202
7203 tw32(MAC_MODE, tp->mac_mode);
7204 udelay(40);
7205}
7206
Matt Carlson941ec902011-08-19 13:58:23 +00007207static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007208{
Matt Carlson941ec902011-08-19 13:58:23 +00007209 u32 val, bmcr, mac_mode, ptest = 0;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007210
7211 tg3_phy_toggle_apd(tp, false);
7212 tg3_phy_toggle_automdix(tp, 0);
7213
Matt Carlson941ec902011-08-19 13:58:23 +00007214 if (extlpbk && tg3_phy_set_extloopbk(tp))
7215 return -EIO;
7216
7217 bmcr = BMCR_FULLDPLX;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007218 switch (speed) {
7219 case SPEED_10:
7220 break;
7221 case SPEED_100:
7222 bmcr |= BMCR_SPEED100;
7223 break;
7224 case SPEED_1000:
7225 default:
7226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7227 speed = SPEED_100;
7228 bmcr |= BMCR_SPEED100;
7229 } else {
7230 speed = SPEED_1000;
7231 bmcr |= BMCR_SPEED1000;
7232 }
7233 }
7234
Matt Carlson941ec902011-08-19 13:58:23 +00007235 if (extlpbk) {
7236 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7237 tg3_readphy(tp, MII_CTRL1000, &val);
7238 val |= CTL1000_AS_MASTER |
7239 CTL1000_ENABLE_MASTER;
7240 tg3_writephy(tp, MII_CTRL1000, val);
7241 } else {
7242 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7243 MII_TG3_FET_PTEST_TRIM_2;
7244 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7245 }
7246 } else
7247 bmcr |= BMCR_LOOPBACK;
7248
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007249 tg3_writephy(tp, MII_BMCR, bmcr);
7250
7251 /* The write needs to be flushed for the FETs */
7252 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7253 tg3_readphy(tp, MII_BMCR, &bmcr);
7254
7255 udelay(40);
7256
7257 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson941ec902011-08-19 13:58:23 +00007259 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007260 MII_TG3_FET_PTEST_FRC_TX_LINK |
7261 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7262
7263 /* The write needs to be flushed for the AC131 */
7264 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7265 }
7266
7267 /* Reset to prevent losing 1st rx packet intermittently */
7268 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7269 tg3_flag(tp, 5780_CLASS)) {
7270 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7271 udelay(10);
7272 tw32_f(MAC_RX_MODE, tp->rx_mode);
7273 }
7274
7275 mac_mode = tp->mac_mode &
7276 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7277 if (speed == SPEED_1000)
7278 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7279 else
7280 mac_mode |= MAC_MODE_PORT_MODE_MII;
7281
7282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7283 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7284
7285 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7286 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7287 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7288 mac_mode |= MAC_MODE_LINK_POLARITY;
7289
7290 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7291 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7292 }
7293
7294 tw32(MAC_MODE, mac_mode);
7295 udelay(40);
Matt Carlson941ec902011-08-19 13:58:23 +00007296
7297 return 0;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007298}
7299
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007300static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007301{
7302 struct tg3 *tp = netdev_priv(dev);
7303
7304 if (features & NETIF_F_LOOPBACK) {
7305 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7306 return;
7307
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007308 spin_lock_bh(&tp->lock);
Matt Carlson6e01b202011-08-19 13:58:20 +00007309 tg3_mac_loopback(tp, true);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007310 netif_carrier_on(tp->dev);
7311 spin_unlock_bh(&tp->lock);
7312 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7313 } else {
7314 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7315 return;
7316
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007317 spin_lock_bh(&tp->lock);
Matt Carlson6e01b202011-08-19 13:58:20 +00007318 tg3_mac_loopback(tp, false);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007319 /* Force link status check */
7320 tg3_setup_phy(tp, 1);
7321 spin_unlock_bh(&tp->lock);
7322 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7323 }
7324}
7325
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007326static netdev_features_t tg3_fix_features(struct net_device *dev,
7327 netdev_features_t features)
Michał Mirosławdc668912011-04-07 03:35:07 +00007328{
7329 struct tg3 *tp = netdev_priv(dev);
7330
Joe Perches63c3a662011-04-26 08:12:10 +00007331 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
Michał Mirosławdc668912011-04-07 03:35:07 +00007332 features &= ~NETIF_F_ALL_TSO;
7333
7334 return features;
7335}
7336
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007337static int tg3_set_features(struct net_device *dev, netdev_features_t features)
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007338{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007339 netdev_features_t changed = dev->features ^ features;
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007340
7341 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7342 tg3_set_loopback(dev, features);
7343
7344 return 0;
7345}
7346
Matt Carlson21f581a2009-08-28 14:00:25 +00007347static void tg3_rx_prodring_free(struct tg3 *tp,
7348 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007349{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350 int i;
7351
Matt Carlson8fea32b2010-09-15 08:59:58 +00007352 if (tpr != &tp->napi[0].prodring) {
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007353 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
Matt Carlson2c49a442010-09-30 10:34:35 +00007354 i = (i + 1) & tp->rx_std_ring_mask)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007355 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007356 tp->rx_pkt_map_sz);
7357
Joe Perches63c3a662011-04-26 08:12:10 +00007358 if (tg3_flag(tp, JUMBO_CAPABLE)) {
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007359 for (i = tpr->rx_jmb_cons_idx;
7360 i != tpr->rx_jmb_prod_idx;
Matt Carlson2c49a442010-09-30 10:34:35 +00007361 i = (i + 1) & tp->rx_jmb_ring_mask) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00007362 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007363 TG3_RX_JMB_MAP_SZ);
7364 }
7365 }
7366
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007367 return;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007369
Matt Carlson2c49a442010-09-30 10:34:35 +00007370 for (i = 0; i <= tp->rx_std_ring_mask; i++)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007371 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007372 tp->rx_pkt_map_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373
Joe Perches63c3a662011-04-26 08:12:10 +00007374 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007375 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007376 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007377 TG3_RX_JMB_MAP_SZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007378 }
7379}
7380
Matt Carlsonc6cdf432010-04-05 10:19:26 +00007381/* Initialize rx rings for packet processing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007382 *
7383 * The chip has been shut down and the driver detached from
7384 * the networking, so no interrupts or new tx packets will
7385 * end up in the driver. tp->{tx,}lock are held and thus
7386 * we may not sleep.
7387 */
Matt Carlson21f581a2009-08-28 14:00:25 +00007388static int tg3_rx_prodring_alloc(struct tg3 *tp,
7389 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007390{
Matt Carlson287be122009-08-28 13:58:46 +00007391 u32 i, rx_pkt_dma_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007392
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007393 tpr->rx_std_cons_idx = 0;
7394 tpr->rx_std_prod_idx = 0;
7395 tpr->rx_jmb_cons_idx = 0;
7396 tpr->rx_jmb_prod_idx = 0;
7397
Matt Carlson8fea32b2010-09-15 08:59:58 +00007398 if (tpr != &tp->napi[0].prodring) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007399 memset(&tpr->rx_std_buffers[0], 0,
7400 TG3_RX_STD_BUFF_RING_SIZE(tp));
Matt Carlson48035722010-10-14 10:37:43 +00007401 if (tpr->rx_jmb_buffers)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007402 memset(&tpr->rx_jmb_buffers[0], 0,
Matt Carlson2c49a442010-09-30 10:34:35 +00007403 TG3_RX_JMB_BUFF_RING_SIZE(tp));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007404 goto done;
7405 }
7406
Linus Torvalds1da177e2005-04-16 15:20:36 -07007407 /* Zero out all descriptors. */
Matt Carlson2c49a442010-09-30 10:34:35 +00007408 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007409
Matt Carlson287be122009-08-28 13:58:46 +00007410 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
Joe Perches63c3a662011-04-26 08:12:10 +00007411 if (tg3_flag(tp, 5780_CLASS) &&
Matt Carlson287be122009-08-28 13:58:46 +00007412 tp->dev->mtu > ETH_DATA_LEN)
7413 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7414 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
Michael Chan7e72aad2005-07-25 12:31:17 -07007415
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416 /* Initialize invariants of the rings, we only set this
7417 * stuff once. This works because the card does not
7418 * write into the rx buffer posting rings.
7419 */
Matt Carlson2c49a442010-09-30 10:34:35 +00007420 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007421 struct tg3_rx_buffer_desc *rxd;
7422
Matt Carlson21f581a2009-08-28 14:00:25 +00007423 rxd = &tpr->rx_std[i];
Matt Carlson287be122009-08-28 13:58:46 +00007424 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007425 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7426 rxd->opaque = (RXD_OPAQUE_RING_STD |
7427 (i << RXD_OPAQUE_INDEX_SHIFT));
7428 }
7429
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007430 /* Now allocate fresh SKBs for each rx ring. */
7431 for (i = 0; i < tp->rx_pending; i++) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00007432 unsigned int frag_size;
7433
7434 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7435 &frag_size) < 0) {
Matt Carlson5129c3a2010-04-05 10:19:23 +00007436 netdev_warn(tp->dev,
7437 "Using a smaller RX standard ring. Only "
7438 "%d out of %d buffers were allocated "
7439 "successfully\n", i, tp->rx_pending);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007440 if (i == 0)
7441 goto initfail;
7442 tp->rx_pending = i;
7443 break;
7444 }
7445 }
7446
Joe Perches63c3a662011-04-26 08:12:10 +00007447 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007448 goto done;
7449
Matt Carlson2c49a442010-09-30 10:34:35 +00007450 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007451
Joe Perches63c3a662011-04-26 08:12:10 +00007452 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
Matt Carlson0d86df82010-02-17 15:17:00 +00007453 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454
Matt Carlson2c49a442010-09-30 10:34:35 +00007455 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
Matt Carlson0d86df82010-02-17 15:17:00 +00007456 struct tg3_rx_buffer_desc *rxd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457
Matt Carlson0d86df82010-02-17 15:17:00 +00007458 rxd = &tpr->rx_jmb[i].std;
7459 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7460 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7461 RXD_FLAG_JUMBO;
7462 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7463 (i << RXD_OPAQUE_INDEX_SHIFT));
7464 }
7465
7466 for (i = 0; i < tp->rx_jumbo_pending; i++) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00007467 unsigned int frag_size;
7468
7469 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7470 &frag_size) < 0) {
Matt Carlson5129c3a2010-04-05 10:19:23 +00007471 netdev_warn(tp->dev,
7472 "Using a smaller RX jumbo ring. Only %d "
7473 "out of %d buffers were allocated "
7474 "successfully\n", i, tp->rx_jumbo_pending);
Matt Carlson0d86df82010-02-17 15:17:00 +00007475 if (i == 0)
7476 goto initfail;
7477 tp->rx_jumbo_pending = i;
7478 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007479 }
7480 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007481
7482done:
Michael Chan32d8c572006-07-25 16:38:29 -07007483 return 0;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007484
7485initfail:
Matt Carlson21f581a2009-08-28 14:00:25 +00007486 tg3_rx_prodring_free(tp, tpr);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007487 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007488}
7489
Matt Carlson21f581a2009-08-28 14:00:25 +00007490static void tg3_rx_prodring_fini(struct tg3 *tp,
7491 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007492{
Matt Carlson21f581a2009-08-28 14:00:25 +00007493 kfree(tpr->rx_std_buffers);
7494 tpr->rx_std_buffers = NULL;
7495 kfree(tpr->rx_jmb_buffers);
7496 tpr->rx_jmb_buffers = NULL;
7497 if (tpr->rx_std) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007498 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7499 tpr->rx_std, tpr->rx_std_mapping);
Matt Carlson21f581a2009-08-28 14:00:25 +00007500 tpr->rx_std = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501 }
Matt Carlson21f581a2009-08-28 14:00:25 +00007502 if (tpr->rx_jmb) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007503 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7504 tpr->rx_jmb, tpr->rx_jmb_mapping);
Matt Carlson21f581a2009-08-28 14:00:25 +00007505 tpr->rx_jmb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007507}
7508
Matt Carlson21f581a2009-08-28 14:00:25 +00007509static int tg3_rx_prodring_init(struct tg3 *tp,
7510 struct tg3_rx_prodring_set *tpr)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007511{
Matt Carlson2c49a442010-09-30 10:34:35 +00007512 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7513 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007514 if (!tpr->rx_std_buffers)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007515 return -ENOMEM;
7516
Matt Carlson4bae65c2010-11-24 08:31:52 +00007517 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7518 TG3_RX_STD_RING_BYTES(tp),
7519 &tpr->rx_std_mapping,
7520 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007521 if (!tpr->rx_std)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007522 goto err_out;
7523
Joe Perches63c3a662011-04-26 08:12:10 +00007524 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007525 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
Matt Carlson21f581a2009-08-28 14:00:25 +00007526 GFP_KERNEL);
7527 if (!tpr->rx_jmb_buffers)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007528 goto err_out;
7529
Matt Carlson4bae65c2010-11-24 08:31:52 +00007530 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7531 TG3_RX_JMB_RING_BYTES(tp),
7532 &tpr->rx_jmb_mapping,
7533 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007534 if (!tpr->rx_jmb)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007535 goto err_out;
7536 }
7537
7538 return 0;
7539
7540err_out:
Matt Carlson21f581a2009-08-28 14:00:25 +00007541 tg3_rx_prodring_fini(tp, tpr);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007542 return -ENOMEM;
7543}
7544
7545/* Free up pending packets in all rx/tx rings.
7546 *
7547 * The chip has been shut down and the driver detached from
7548 * the networking, so no interrupts or new tx packets will
7549 * end up in the driver. tp->{tx,}lock is not held and we are not
7550 * in an interrupt context and thus may sleep.
7551 */
7552static void tg3_free_rings(struct tg3 *tp)
7553{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007554 int i, j;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007555
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007556 for (j = 0; j < tp->irq_cnt; j++) {
7557 struct tg3_napi *tnapi = &tp->napi[j];
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007558
Matt Carlson8fea32b2010-09-15 08:59:58 +00007559 tg3_rx_prodring_free(tp, &tnapi->prodring);
Matt Carlsonb28f6422010-06-05 17:24:32 +00007560
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007561 if (!tnapi->tx_buffers)
7562 continue;
7563
Matt Carlson0d681b22011-07-27 14:20:49 +00007564 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7565 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007566
Matt Carlson0d681b22011-07-27 14:20:49 +00007567 if (!skb)
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007568 continue;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007569
Matt Carlsonba1142e2011-11-04 09:15:00 +00007570 tg3_tx_skb_unmap(tnapi, i,
7571 skb_shinfo(skb)->nr_frags - 1);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007572
7573 dev_kfree_skb_any(skb);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007574 }
Tom Herbert5cb917b2012-03-05 19:53:50 +00007575 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007576 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007577}
7578
7579/* Initialize tx/rx rings for packet processing.
7580 *
7581 * The chip has been shut down and the driver detached from
7582 * the networking, so no interrupts or new tx packets will
7583 * end up in the driver. tp->{tx,}lock are held and thus
7584 * we may not sleep.
7585 */
7586static int tg3_init_rings(struct tg3 *tp)
7587{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007588 int i;
Matt Carlson72334482009-08-28 14:03:01 +00007589
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007590 /* Free up all the SKBs. */
7591 tg3_free_rings(tp);
7592
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007593 for (i = 0; i < tp->irq_cnt; i++) {
7594 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007595
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007596 tnapi->last_tag = 0;
7597 tnapi->last_irq_tag = 0;
7598 tnapi->hw_status->status = 0;
7599 tnapi->hw_status->status_tag = 0;
7600 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7601
7602 tnapi->tx_prod = 0;
7603 tnapi->tx_cons = 0;
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007604 if (tnapi->tx_ring)
7605 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007606
7607 tnapi->rx_rcb_ptr = 0;
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007608 if (tnapi->rx_rcb)
7609 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007610
Matt Carlson8fea32b2010-09-15 08:59:58 +00007611 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
Matt Carlsone4af1af2010-02-12 14:47:05 +00007612 tg3_free_rings(tp);
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007613 return -ENOMEM;
Matt Carlsone4af1af2010-02-12 14:47:05 +00007614 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007615 }
Matt Carlson72334482009-08-28 14:03:01 +00007616
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007617 return 0;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007618}
7619
Michael Chan49a359e2012-09-28 07:12:37 +00007620static void tg3_mem_tx_release(struct tg3 *tp)
7621{
7622 int i;
7623
7624 for (i = 0; i < tp->irq_max; i++) {
7625 struct tg3_napi *tnapi = &tp->napi[i];
7626
7627 if (tnapi->tx_ring) {
7628 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7629 tnapi->tx_ring, tnapi->tx_desc_mapping);
7630 tnapi->tx_ring = NULL;
7631 }
7632
7633 kfree(tnapi->tx_buffers);
7634 tnapi->tx_buffers = NULL;
7635 }
7636}
7637
7638static int tg3_mem_tx_acquire(struct tg3 *tp)
7639{
7640 int i;
7641 struct tg3_napi *tnapi = &tp->napi[0];
7642
7643 /* If multivector TSS is enabled, vector 0 does not handle
7644 * tx interrupts. Don't allocate any resources for it.
7645 */
7646 if (tg3_flag(tp, ENABLE_TSS))
7647 tnapi++;
7648
7649 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7650 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7651 TG3_TX_RING_SIZE, GFP_KERNEL);
7652 if (!tnapi->tx_buffers)
7653 goto err_out;
7654
7655 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7656 TG3_TX_RING_BYTES,
7657 &tnapi->tx_desc_mapping,
7658 GFP_KERNEL);
7659 if (!tnapi->tx_ring)
7660 goto err_out;
7661 }
7662
7663 return 0;
7664
7665err_out:
7666 tg3_mem_tx_release(tp);
7667 return -ENOMEM;
7668}
7669
7670static void tg3_mem_rx_release(struct tg3 *tp)
7671{
7672 int i;
7673
7674 for (i = 0; i < tp->irq_max; i++) {
7675 struct tg3_napi *tnapi = &tp->napi[i];
7676
7677 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7678
7679 if (!tnapi->rx_rcb)
7680 continue;
7681
7682 dma_free_coherent(&tp->pdev->dev,
7683 TG3_RX_RCB_RING_BYTES(tp),
7684 tnapi->rx_rcb,
7685 tnapi->rx_rcb_mapping);
7686 tnapi->rx_rcb = NULL;
7687 }
7688}
7689
7690static int tg3_mem_rx_acquire(struct tg3 *tp)
7691{
7692 unsigned int i, limit;
7693
7694 limit = tp->rxq_cnt;
7695
7696 /* If RSS is enabled, we need a (dummy) producer ring
7697 * set on vector zero. This is the true hw prodring.
7698 */
7699 if (tg3_flag(tp, ENABLE_RSS))
7700 limit++;
7701
7702 for (i = 0; i < limit; i++) {
7703 struct tg3_napi *tnapi = &tp->napi[i];
7704
7705 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7706 goto err_out;
7707
7708 /* If multivector RSS is enabled, vector 0
7709 * does not handle rx or tx interrupts.
7710 * Don't allocate any resources for it.
7711 */
7712 if (!i && tg3_flag(tp, ENABLE_RSS))
7713 continue;
7714
7715 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7716 TG3_RX_RCB_RING_BYTES(tp),
7717 &tnapi->rx_rcb_mapping,
7718 GFP_KERNEL);
7719 if (!tnapi->rx_rcb)
7720 goto err_out;
7721
7722 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7723 }
7724
7725 return 0;
7726
7727err_out:
7728 tg3_mem_rx_release(tp);
7729 return -ENOMEM;
7730}
7731
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007732/*
7733 * Must not be invoked with interrupt sources disabled and
7734 * the hardware shutdown down.
7735 */
7736static void tg3_free_consistent(struct tg3 *tp)
7737{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007738 int i;
Matt Carlson898a56f2009-08-28 14:02:40 +00007739
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007740 for (i = 0; i < tp->irq_cnt; i++) {
7741 struct tg3_napi *tnapi = &tp->napi[i];
7742
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007743 if (tnapi->hw_status) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007744 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7745 tnapi->hw_status,
7746 tnapi->status_mapping);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007747 tnapi->hw_status = NULL;
7748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007749 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007750
Michael Chan49a359e2012-09-28 07:12:37 +00007751 tg3_mem_rx_release(tp);
7752 tg3_mem_tx_release(tp);
7753
Linus Torvalds1da177e2005-04-16 15:20:36 -07007754 if (tp->hw_stats) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007755 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7756 tp->hw_stats, tp->stats_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007757 tp->hw_stats = NULL;
7758 }
7759}
7760
7761/*
7762 * Must not be invoked with interrupt sources disabled and
7763 * the hardware shutdown down. Can sleep.
7764 */
7765static int tg3_alloc_consistent(struct tg3 *tp)
7766{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007767 int i;
Matt Carlson898a56f2009-08-28 14:02:40 +00007768
Matt Carlson4bae65c2010-11-24 08:31:52 +00007769 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7770 sizeof(struct tg3_hw_stats),
7771 &tp->stats_mapping,
7772 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007773 if (!tp->hw_stats)
7774 goto err_out;
7775
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7777
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007778 for (i = 0; i < tp->irq_cnt; i++) {
7779 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007780 struct tg3_hw_status *sblk;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007781
Matt Carlson4bae65c2010-11-24 08:31:52 +00007782 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7783 TG3_HW_STATUS_SIZE,
7784 &tnapi->status_mapping,
7785 GFP_KERNEL);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007786 if (!tnapi->hw_status)
7787 goto err_out;
7788
7789 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007790 sblk = tnapi->hw_status;
7791
Michael Chan49a359e2012-09-28 07:12:37 +00007792 if (tg3_flag(tp, ENABLE_RSS)) {
Michael Chan86449942012-10-02 20:31:14 -07007793 u16 *prodptr = NULL;
Matt Carlson8fea32b2010-09-15 08:59:58 +00007794
Michael Chan49a359e2012-09-28 07:12:37 +00007795 /*
7796 * When RSS is enabled, the status block format changes
7797 * slightly. The "rx_jumbo_consumer", "reserved",
7798 * and "rx_mini_consumer" members get mapped to the
7799 * other three rx return ring producer indexes.
7800 */
7801 switch (i) {
7802 case 1:
7803 prodptr = &sblk->idx[0].rx_producer;
7804 break;
7805 case 2:
7806 prodptr = &sblk->rx_jumbo_consumer;
7807 break;
7808 case 3:
7809 prodptr = &sblk->reserved;
7810 break;
7811 case 4:
7812 prodptr = &sblk->rx_mini_consumer;
Matt Carlsonf891ea12012-04-24 13:37:01 +00007813 break;
7814 }
Michael Chan49a359e2012-09-28 07:12:37 +00007815 tnapi->rx_rcb_prod_idx = prodptr;
7816 } else {
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007817 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00007818 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007819 }
7820
Michael Chan49a359e2012-09-28 07:12:37 +00007821 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7822 goto err_out;
7823
Linus Torvalds1da177e2005-04-16 15:20:36 -07007824 return 0;
7825
7826err_out:
7827 tg3_free_consistent(tp);
7828 return -ENOMEM;
7829}
7830
7831#define MAX_WAIT_CNT 1000
7832
7833/* To stop a block, clear the enable bit and poll till it
7834 * clears. tp->lock is held.
7835 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007836static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007837{
7838 unsigned int i;
7839 u32 val;
7840
Joe Perches63c3a662011-04-26 08:12:10 +00007841 if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007842 switch (ofs) {
7843 case RCVLSC_MODE:
7844 case DMAC_MODE:
7845 case MBFREE_MODE:
7846 case BUFMGR_MODE:
7847 case MEMARB_MODE:
7848 /* We can't enable/disable these bits of the
7849 * 5705/5750, just say success.
7850 */
7851 return 0;
7852
7853 default:
7854 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007855 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856 }
7857
7858 val = tr32(ofs);
7859 val &= ~enable_bit;
7860 tw32_f(ofs, val);
7861
7862 for (i = 0; i < MAX_WAIT_CNT; i++) {
7863 udelay(100);
7864 val = tr32(ofs);
7865 if ((val & enable_bit) == 0)
7866 break;
7867 }
7868
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007869 if (i == MAX_WAIT_CNT && !silent) {
Matt Carlson2445e462010-04-05 10:19:21 +00007870 dev_err(&tp->pdev->dev,
7871 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7872 ofs, enable_bit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007873 return -ENODEV;
7874 }
7875
7876 return 0;
7877}
7878
7879/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007880static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007881{
7882 int i, err;
7883
7884 tg3_disable_ints(tp);
7885
7886 tp->rx_mode &= ~RX_MODE_ENABLE;
7887 tw32_f(MAC_RX_MODE, tp->rx_mode);
7888 udelay(10);
7889
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007890 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7891 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7892 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7893 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7894 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7895 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007896
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007897 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7898 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7899 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7900 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7901 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7902 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7903 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007904
7905 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7906 tw32_f(MAC_MODE, tp->mac_mode);
7907 udelay(40);
7908
7909 tp->tx_mode &= ~TX_MODE_ENABLE;
7910 tw32_f(MAC_TX_MODE, tp->tx_mode);
7911
7912 for (i = 0; i < MAX_WAIT_CNT; i++) {
7913 udelay(100);
7914 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7915 break;
7916 }
7917 if (i >= MAX_WAIT_CNT) {
Matt Carlsonab96b242010-04-05 10:19:22 +00007918 dev_err(&tp->pdev->dev,
7919 "%s timed out, TX_MODE_ENABLE will not clear "
7920 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07007921 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007922 }
7923
Michael Chane6de8ad2005-05-05 14:42:41 -07007924 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007925 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7926 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007927
7928 tw32(FTQ_RESET, 0xffffffff);
7929 tw32(FTQ_RESET, 0x00000000);
7930
David S. Millerb3b7d6b2005-05-05 14:40:20 -07007931 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7932 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007933
Matt Carlsonf77a6a82009-09-01 13:04:37 +00007934 for (i = 0; i < tp->irq_cnt; i++) {
7935 struct tg3_napi *tnapi = &tp->napi[i];
7936 if (tnapi->hw_status)
7937 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7938 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007939
Linus Torvalds1da177e2005-04-16 15:20:36 -07007940 return err;
7941}
7942
Michael Chanee6a99b2007-07-18 21:49:10 -07007943/* Save PCI command register before chip reset */
7944static void tg3_save_pci_state(struct tg3 *tp)
7945{
Matt Carlson8a6eac92007-10-21 16:17:55 -07007946 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07007947}
7948
7949/* Restore PCI state after chip reset */
7950static void tg3_restore_pci_state(struct tg3 *tp)
7951{
7952 u32 val;
7953
7954 /* Re-enable indirect register accesses. */
7955 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7956 tp->misc_host_ctrl);
7957
7958 /* Set MAX PCI retry to zero. */
7959 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7960 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
Joe Perches63c3a662011-04-26 08:12:10 +00007961 tg3_flag(tp, PCIX_MODE))
Michael Chanee6a99b2007-07-18 21:49:10 -07007962 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07007963 /* Allow reads and writes to the APE register and memory space. */
Joe Perches63c3a662011-04-26 08:12:10 +00007964 if (tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -07007965 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +00007966 PCISTATE_ALLOW_APE_SHMEM_WR |
7967 PCISTATE_ALLOW_APE_PSPACE_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07007968 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7969
Matt Carlson8a6eac92007-10-21 16:17:55 -07007970 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07007971
Matt Carlson2c55a3d2011-11-28 09:41:04 +00007972 if (!tg3_flag(tp, PCI_EXPRESS)) {
7973 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7974 tp->pci_cacheline_sz);
7975 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7976 tp->pci_lat_timer);
Michael Chan114342f2007-10-15 02:12:26 -07007977 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08007978
Michael Chanee6a99b2007-07-18 21:49:10 -07007979 /* Make sure PCI-X relaxed ordering bit is clear. */
Joe Perches63c3a662011-04-26 08:12:10 +00007980 if (tg3_flag(tp, PCIX_MODE)) {
Matt Carlson9974a352007-10-07 23:27:28 -07007981 u16 pcix_cmd;
7982
7983 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7984 &pcix_cmd);
7985 pcix_cmd &= ~PCI_X_CMD_ERO;
7986 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7987 pcix_cmd);
7988 }
Michael Chanee6a99b2007-07-18 21:49:10 -07007989
Joe Perches63c3a662011-04-26 08:12:10 +00007990 if (tg3_flag(tp, 5780_CLASS)) {
Michael Chanee6a99b2007-07-18 21:49:10 -07007991
7992 /* Chip reset on 5780 will reset MSI enable bit,
7993 * so need to restore it.
7994 */
Joe Perches63c3a662011-04-26 08:12:10 +00007995 if (tg3_flag(tp, USING_MSI)) {
Michael Chanee6a99b2007-07-18 21:49:10 -07007996 u16 ctrl;
7997
7998 pci_read_config_word(tp->pdev,
7999 tp->msi_cap + PCI_MSI_FLAGS,
8000 &ctrl);
8001 pci_write_config_word(tp->pdev,
8002 tp->msi_cap + PCI_MSI_FLAGS,
8003 ctrl | PCI_MSI_FLAGS_ENABLE);
8004 val = tr32(MSGINT_MODE);
8005 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8006 }
8007 }
8008}
8009
Linus Torvalds1da177e2005-04-16 15:20:36 -07008010/* tp->lock is held. */
8011static int tg3_chip_reset(struct tg3 *tp)
8012{
8013 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07008014 void (*write_op)(struct tg3 *, u32, u32);
Matt Carlson4f125f42009-09-01 12:55:02 +00008015 int i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008016
David S. Millerf49639e2006-06-09 11:58:36 -07008017 tg3_nvram_lock(tp);
8018
Matt Carlson77b483f2008-08-15 14:07:24 -07008019 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8020
David S. Millerf49639e2006-06-09 11:58:36 -07008021 /* No matching tg3_nvram_unlock() after this because
8022 * chip reset below will undo the nvram lock.
8023 */
8024 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008025
Michael Chanee6a99b2007-07-18 21:49:10 -07008026 /* GRC_MISC_CFG core clock reset will clear the memory
8027 * enable bit in PCI register 4 and the MSI enable bit
8028 * on some chips, so we save relevant registers here.
8029 */
8030 tg3_save_pci_state(tp);
8031
Michael Chand9ab5ad2006-03-20 22:27:35 -08008032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Joe Perches63c3a662011-04-26 08:12:10 +00008033 tg3_flag(tp, 5755_PLUS))
Michael Chand9ab5ad2006-03-20 22:27:35 -08008034 tw32(GRC_FASTBOOT_PC, 0);
8035
Linus Torvalds1da177e2005-04-16 15:20:36 -07008036 /*
8037 * We must avoid the readl() that normally takes place.
8038 * It locks machines, causes machine checks, and other
8039 * fun things. So, temporarily disable the 5701
8040 * hardware workaround, while we do the reset.
8041 */
Michael Chan1ee582d2005-08-09 20:16:46 -07008042 write_op = tp->write32;
8043 if (write_op == tg3_write_flush_reg32)
8044 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008045
Michael Chand18edcb2007-03-24 20:57:11 -07008046 /* Prevent the irq handler from reading or writing PCI registers
8047 * during chip reset when the memory enable bit in the PCI command
8048 * register may be cleared. The chip does not generate interrupt
8049 * at this time, but the irq handler may still be called due to irq
8050 * sharing or irqpoll.
8051 */
Joe Perches63c3a662011-04-26 08:12:10 +00008052 tg3_flag_set(tp, CHIP_RESETTING);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008053 for (i = 0; i < tp->irq_cnt; i++) {
8054 struct tg3_napi *tnapi = &tp->napi[i];
8055 if (tnapi->hw_status) {
8056 tnapi->hw_status->status = 0;
8057 tnapi->hw_status->status_tag = 0;
8058 }
8059 tnapi->last_tag = 0;
8060 tnapi->last_irq_tag = 0;
Michael Chanb8fa2f32007-04-06 17:35:37 -07008061 }
Michael Chand18edcb2007-03-24 20:57:11 -07008062 smp_mb();
Matt Carlson4f125f42009-09-01 12:55:02 +00008063
8064 for (i = 0; i < tp->irq_cnt; i++)
8065 synchronize_irq(tp->napi[i].irq_vec);
Michael Chand18edcb2007-03-24 20:57:11 -07008066
Matt Carlson255ca312009-08-25 10:07:27 +00008067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8068 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8069 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8070 }
8071
Linus Torvalds1da177e2005-04-16 15:20:36 -07008072 /* do the reset */
8073 val = GRC_MISC_CFG_CORECLK_RESET;
8074
Joe Perches63c3a662011-04-26 08:12:10 +00008075 if (tg3_flag(tp, PCI_EXPRESS)) {
Matt Carlson88075d92010-08-02 11:25:58 +00008076 /* Force PCIe 1.0a mode */
8077 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008078 !tg3_flag(tp, 57765_PLUS) &&
Matt Carlson88075d92010-08-02 11:25:58 +00008079 tr32(TG3_PCIE_PHY_TSTCTL) ==
8080 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8081 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8082
Linus Torvalds1da177e2005-04-16 15:20:36 -07008083 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8084 tw32(GRC_MISC_CFG, (1 << 29));
8085 val |= (1 << 29);
8086 }
8087 }
8088
Michael Chanb5d37722006-09-27 16:06:21 -07008089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8090 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8091 tw32(GRC_VCPU_EXT_CTRL,
8092 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8093 }
8094
Matt Carlsonf37500d2010-08-02 11:25:59 +00008095 /* Manage gphy power for all CPMU absent PCIe devices. */
Joe Perches63c3a662011-04-26 08:12:10 +00008096 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
Matt Carlsonf37500d2010-08-02 11:25:59 +00008098
Linus Torvalds1da177e2005-04-16 15:20:36 -07008099 tw32(GRC_MISC_CFG, val);
8100
Michael Chan1ee582d2005-08-09 20:16:46 -07008101 /* restore 5701 hardware bug workaround write method */
8102 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103
8104 /* Unfortunately, we have to delay before the PCI read back.
8105 * Some 575X chips even will not respond to a PCI cfg access
8106 * when the reset command is given to the chip.
8107 *
8108 * How do these hardware designers expect things to work
8109 * properly if the PCI write is posted for a long period
8110 * of time? It is always necessary to have some method by
8111 * which a register read back can occur to push the write
8112 * out which does the reset.
8113 *
8114 * For most tg3 variants the trick below was working.
8115 * Ho hum...
8116 */
8117 udelay(120);
8118
8119 /* Flush PCI posted writes. The normal MMIO registers
8120 * are inaccessible at this time so this is the only
8121 * way to make this reliably (actually, this is no longer
8122 * the case, see above). I tried to use indirect
8123 * register read/write but this upset some 5701 variants.
8124 */
8125 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8126
8127 udelay(120);
8128
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008129 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
Matt Carlsone7126992009-08-25 10:08:16 +00008130 u16 val16;
8131
Linus Torvalds1da177e2005-04-16 15:20:36 -07008132 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
Michael Chan86449942012-10-02 20:31:14 -07008133 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008134 u32 cfg_val;
8135
8136 /* Wait for link training to complete. */
Michael Chan86449942012-10-02 20:31:14 -07008137 for (j = 0; j < 5000; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008138 udelay(100);
8139
8140 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8141 pci_write_config_dword(tp->pdev, 0xc4,
8142 cfg_val | (1 << 15));
8143 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008144
Matt Carlsone7126992009-08-25 10:08:16 +00008145 /* Clear the "no snoop" and "relaxed ordering" bits. */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008146 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
Matt Carlsone7126992009-08-25 10:08:16 +00008147 /*
8148 * Older PCIe devices only support the 128 byte
8149 * MPS setting. Enforce the restriction.
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008150 */
Joe Perches63c3a662011-04-26 08:12:10 +00008151 if (!tg3_flag(tp, CPMU_PRESENT))
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008152 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8153 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008154
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008155 /* Clear error status */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008156 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008157 PCI_EXP_DEVSTA_CED |
8158 PCI_EXP_DEVSTA_NFED |
8159 PCI_EXP_DEVSTA_FED |
8160 PCI_EXP_DEVSTA_URD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161 }
8162
Michael Chanee6a99b2007-07-18 21:49:10 -07008163 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008164
Joe Perches63c3a662011-04-26 08:12:10 +00008165 tg3_flag_clear(tp, CHIP_RESETTING);
8166 tg3_flag_clear(tp, ERROR_PROCESSED);
Michael Chand18edcb2007-03-24 20:57:11 -07008167
Michael Chanee6a99b2007-07-18 21:49:10 -07008168 val = 0;
Joe Perches63c3a662011-04-26 08:12:10 +00008169 if (tg3_flag(tp, 5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -07008170 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07008171 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008172
8173 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8174 tg3_stop_fw(tp);
8175 tw32(0x5000, 0x400);
8176 }
8177
8178 tw32(GRC_MODE, tp->grc_mode);
8179
8180 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008181 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008182
8183 tw32(0xc4, val | (1 << 15));
8184 }
8185
8186 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8188 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8189 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8190 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8191 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8192 }
8193
Matt Carlsonf07e9af2010-08-02 11:26:07 +00008194 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Matt Carlson9e975cc2011-07-20 10:20:50 +00008195 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008196 val = tp->mac_mode;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00008197 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
Matt Carlson9e975cc2011-07-20 10:20:50 +00008198 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008199 val = tp->mac_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008200 } else
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008201 val = 0;
8202
8203 tw32_f(MAC_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008204 udelay(40);
8205
Matt Carlson77b483f2008-08-15 14:07:24 -07008206 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8207
Michael Chan7a6f4362006-09-27 16:03:31 -07008208 err = tg3_poll_fw(tp);
8209 if (err)
8210 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008211
Matt Carlson0a9140c2009-08-28 12:27:50 +00008212 tg3_mdio_start(tp);
8213
Joe Perches63c3a662011-04-26 08:12:10 +00008214 if (tg3_flag(tp, PCI_EXPRESS) &&
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00008215 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8216 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008217 !tg3_flag(tp, 57765_PLUS)) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008218 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008219
8220 tw32(0x7c00, val | (1 << 25));
8221 }
8222
Matt Carlsond78b59f2011-04-05 14:22:46 +00008223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8224 val = tr32(TG3_CPMU_CLCK_ORIDE);
8225 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8226 }
8227
Linus Torvalds1da177e2005-04-16 15:20:36 -07008228 /* Reprobe ASF enable state. */
Joe Perches63c3a662011-04-26 08:12:10 +00008229 tg3_flag_clear(tp, ENABLE_ASF);
8230 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008231 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8232 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8233 u32 nic_cfg;
8234
8235 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8236 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
Joe Perches63c3a662011-04-26 08:12:10 +00008237 tg3_flag_set(tp, ENABLE_ASF);
Matt Carlson4ba526c2008-08-15 14:10:04 -07008238 tp->last_event_jiffies = jiffies;
Joe Perches63c3a662011-04-26 08:12:10 +00008239 if (tg3_flag(tp, 5750_PLUS))
8240 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008241 }
8242 }
8243
8244 return 0;
8245}
8246
Matt Carlson65ec6982012-02-28 23:33:37 +00008247static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8248static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
Matt Carlson92feeab2011-12-08 14:40:14 +00008249
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07008251static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008252{
8253 int err;
8254
8255 tg3_stop_fw(tp);
8256
Michael Chan944d9802005-05-29 14:57:48 -07008257 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008258
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008259 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008260 err = tg3_chip_reset(tp);
8261
Matt Carlsondaba2a62009-04-20 06:58:52 +00008262 __tg3_set_mac_addr(tp, 0);
8263
Michael Chan944d9802005-05-29 14:57:48 -07008264 tg3_write_sig_legacy(tp, kind);
8265 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008266
Matt Carlson92feeab2011-12-08 14:40:14 +00008267 if (tp->hw_stats) {
8268 /* Save the stats across chip resets... */
David S. Millerb4017c52012-03-01 17:57:40 -05008269 tg3_get_nstats(tp, &tp->net_stats_prev);
Matt Carlson92feeab2011-12-08 14:40:14 +00008270 tg3_get_estats(tp, &tp->estats_prev);
8271
8272 /* And make sure the next sample is new data */
8273 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8274 }
8275
Linus Torvalds1da177e2005-04-16 15:20:36 -07008276 if (err)
8277 return err;
8278
8279 return 0;
8280}
8281
Linus Torvalds1da177e2005-04-16 15:20:36 -07008282static int tg3_set_mac_addr(struct net_device *dev, void *p)
8283{
8284 struct tg3 *tp = netdev_priv(dev);
8285 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07008286 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008287
Michael Chanf9804dd2005-09-27 12:13:10 -07008288 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00008289 return -EADDRNOTAVAIL;
Michael Chanf9804dd2005-09-27 12:13:10 -07008290
Linus Torvalds1da177e2005-04-16 15:20:36 -07008291 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8292
Michael Chane75f7c92006-03-20 21:33:26 -08008293 if (!netif_running(dev))
8294 return 0;
8295
Joe Perches63c3a662011-04-26 08:12:10 +00008296 if (tg3_flag(tp, ENABLE_ASF)) {
Michael Chan986e0ae2007-05-05 12:10:20 -07008297 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07008298
Michael Chan986e0ae2007-05-05 12:10:20 -07008299 addr0_high = tr32(MAC_ADDR_0_HIGH);
8300 addr0_low = tr32(MAC_ADDR_0_LOW);
8301 addr1_high = tr32(MAC_ADDR_1_HIGH);
8302 addr1_low = tr32(MAC_ADDR_1_LOW);
8303
8304 /* Skip MAC addr 1 if ASF is using it. */
8305 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8306 !(addr1_high == 0 && addr1_low == 0))
8307 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07008308 }
Michael Chan986e0ae2007-05-05 12:10:20 -07008309 spin_lock_bh(&tp->lock);
8310 __tg3_set_mac_addr(tp, skip_mac_1);
8311 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008312
Michael Chanb9ec6c12006-07-25 16:37:27 -07008313 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008314}
8315
8316/* tp->lock is held. */
8317static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8318 dma_addr_t mapping, u32 maxlen_flags,
8319 u32 nic_addr)
8320{
8321 tg3_write_mem(tp,
8322 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8323 ((u64) mapping >> 32));
8324 tg3_write_mem(tp,
8325 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8326 ((u64) mapping & 0xffffffff));
8327 tg3_write_mem(tp,
8328 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8329 maxlen_flags);
8330
Joe Perches63c3a662011-04-26 08:12:10 +00008331 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008332 tg3_write_mem(tp,
8333 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8334 nic_addr);
8335}
8336
Michael Chana489b6d2012-09-28 07:12:39 +00008337
8338static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07008339{
Michael Chana489b6d2012-09-28 07:12:39 +00008340 int i = 0;
Matt Carlsonb6080e12009-09-01 13:12:00 +00008341
Joe Perches63c3a662011-04-26 08:12:10 +00008342 if (!tg3_flag(tp, ENABLE_TSS)) {
Matt Carlsonb6080e12009-09-01 13:12:00 +00008343 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8344 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8345 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
Matt Carlsonb6080e12009-09-01 13:12:00 +00008346 } else {
8347 tw32(HOSTCC_TXCOL_TICKS, 0);
8348 tw32(HOSTCC_TXMAX_FRAMES, 0);
8349 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
Michael Chana489b6d2012-09-28 07:12:39 +00008350
8351 for (; i < tp->txq_cnt; i++) {
8352 u32 reg;
8353
8354 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8355 tw32(reg, ec->tx_coalesce_usecs);
8356 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8357 tw32(reg, ec->tx_max_coalesced_frames);
8358 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8359 tw32(reg, ec->tx_max_coalesced_frames_irq);
8360 }
Matt Carlson19cfaec2009-12-03 08:36:20 +00008361 }
Matt Carlsonb6080e12009-09-01 13:12:00 +00008362
Michael Chana489b6d2012-09-28 07:12:39 +00008363 for (; i < tp->irq_max - 1; i++) {
8364 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8365 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8366 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8367 }
8368}
8369
8370static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8371{
8372 int i = 0;
8373 u32 limit = tp->rxq_cnt;
8374
Joe Perches63c3a662011-04-26 08:12:10 +00008375 if (!tg3_flag(tp, ENABLE_RSS)) {
Matt Carlson19cfaec2009-12-03 08:36:20 +00008376 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8377 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8378 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
Michael Chana489b6d2012-09-28 07:12:39 +00008379 limit--;
Matt Carlson19cfaec2009-12-03 08:36:20 +00008380 } else {
Matt Carlsonb6080e12009-09-01 13:12:00 +00008381 tw32(HOSTCC_RXCOL_TICKS, 0);
8382 tw32(HOSTCC_RXMAX_FRAMES, 0);
8383 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
David S. Miller15f98502005-05-18 22:49:26 -07008384 }
Matt Carlsonb6080e12009-09-01 13:12:00 +00008385
Michael Chana489b6d2012-09-28 07:12:39 +00008386 for (; i < limit; i++) {
8387 u32 reg;
8388
8389 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8390 tw32(reg, ec->rx_coalesce_usecs);
8391 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8392 tw32(reg, ec->rx_max_coalesced_frames);
8393 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8394 tw32(reg, ec->rx_max_coalesced_frames_irq);
8395 }
8396
8397 for (; i < tp->irq_max - 1; i++) {
8398 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8399 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8400 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8401 }
8402}
8403
8404static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8405{
8406 tg3_coal_tx_init(tp, ec);
8407 tg3_coal_rx_init(tp, ec);
8408
Joe Perches63c3a662011-04-26 08:12:10 +00008409 if (!tg3_flag(tp, 5705_PLUS)) {
David S. Miller15f98502005-05-18 22:49:26 -07008410 u32 val = ec->stats_block_coalesce_usecs;
8411
Matt Carlsonb6080e12009-09-01 13:12:00 +00008412 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8413 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8414
David S. Miller15f98502005-05-18 22:49:26 -07008415 if (!netif_carrier_ok(tp->dev))
8416 val = 0;
8417
8418 tw32(HOSTCC_STAT_COAL_TICKS, val);
8419 }
8420}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008421
8422/* tp->lock is held. */
Matt Carlson2d31eca2009-09-01 12:53:31 +00008423static void tg3_rings_reset(struct tg3 *tp)
8424{
8425 int i;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008426 u32 stblk, txrcb, rxrcb, limit;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008427 struct tg3_napi *tnapi = &tp->napi[0];
8428
8429 /* Disable all transmit rings but the first. */
Joe Perches63c3a662011-04-26 08:12:10 +00008430 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008431 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
Joe Perches63c3a662011-04-26 08:12:10 +00008432 else if (tg3_flag(tp, 5717_PLUS))
Matt Carlson3d377282010-10-14 10:37:39 +00008433 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
Matt Carlson55086ad2011-12-14 11:09:59 +00008434 else if (tg3_flag(tp, 57765_CLASS))
Matt Carlsonb703df62009-12-03 08:36:21 +00008435 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008436 else
8437 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8438
8439 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8440 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8441 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8442 BDINFO_FLAGS_DISABLED);
8443
8444
8445 /* Disable all receive return rings but the first. */
Joe Perches63c3a662011-04-26 08:12:10 +00008446 if (tg3_flag(tp, 5717_PLUS))
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00008447 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
Joe Perches63c3a662011-04-26 08:12:10 +00008448 else if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008449 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
Matt Carlsonb703df62009-12-03 08:36:21 +00008450 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00008451 tg3_flag(tp, 57765_CLASS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008452 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8453 else
8454 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8455
8456 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8457 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8458 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8459 BDINFO_FLAGS_DISABLED);
8460
8461 /* Disable interrupts */
8462 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00008463 tp->napi[0].chk_msi_cnt = 0;
8464 tp->napi[0].last_rx_cons = 0;
8465 tp->napi[0].last_tx_cons = 0;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008466
8467 /* Zero mailbox registers. */
Joe Perches63c3a662011-04-26 08:12:10 +00008468 if (tg3_flag(tp, SUPPORT_MSIX)) {
Matt Carlson6fd45cb2010-09-15 08:59:57 +00008469 for (i = 1; i < tp->irq_max; i++) {
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008470 tp->napi[i].tx_prod = 0;
8471 tp->napi[i].tx_cons = 0;
Joe Perches63c3a662011-04-26 08:12:10 +00008472 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc2353a32010-01-20 16:58:08 +00008473 tw32_mailbox(tp->napi[i].prodmbox, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008474 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8475 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
Matt Carlson7f230732011-08-31 11:44:48 +00008476 tp->napi[i].chk_msi_cnt = 0;
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00008477 tp->napi[i].last_rx_cons = 0;
8478 tp->napi[i].last_tx_cons = 0;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008479 }
Joe Perches63c3a662011-04-26 08:12:10 +00008480 if (!tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc2353a32010-01-20 16:58:08 +00008481 tw32_mailbox(tp->napi[0].prodmbox, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008482 } else {
8483 tp->napi[0].tx_prod = 0;
8484 tp->napi[0].tx_cons = 0;
8485 tw32_mailbox(tp->napi[0].prodmbox, 0);
8486 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8487 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008488
8489 /* Make sure the NIC-based send BD rings are disabled. */
Joe Perches63c3a662011-04-26 08:12:10 +00008490 if (!tg3_flag(tp, 5705_PLUS)) {
Matt Carlson2d31eca2009-09-01 12:53:31 +00008491 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8492 for (i = 0; i < 16; i++)
8493 tw32_tx_mbox(mbox + i * 8, 0);
8494 }
8495
8496 txrcb = NIC_SRAM_SEND_RCB;
8497 rxrcb = NIC_SRAM_RCV_RET_RCB;
8498
8499 /* Clear status block in ram. */
8500 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8501
8502 /* Set status block DMA address */
8503 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8504 ((u64) tnapi->status_mapping >> 32));
8505 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8506 ((u64) tnapi->status_mapping & 0xffffffff));
8507
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008508 if (tnapi->tx_ring) {
8509 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8510 (TG3_TX_RING_SIZE <<
8511 BDINFO_FLAGS_MAXLEN_SHIFT),
8512 NIC_SRAM_TX_BUFFER_DESC);
8513 txrcb += TG3_BDINFO_SIZE;
8514 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008515
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008516 if (tnapi->rx_rcb) {
8517 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
Matt Carlson7cb32cf2010-09-30 10:34:36 +00008518 (tp->rx_ret_ring_mask + 1) <<
8519 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008520 rxrcb += TG3_BDINFO_SIZE;
8521 }
8522
8523 stblk = HOSTCC_STATBLCK_RING1;
8524
8525 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8526 u64 mapping = (u64)tnapi->status_mapping;
8527 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8528 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8529
8530 /* Clear status block in ram. */
8531 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8532
Matt Carlson19cfaec2009-12-03 08:36:20 +00008533 if (tnapi->tx_ring) {
8534 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8535 (TG3_TX_RING_SIZE <<
8536 BDINFO_FLAGS_MAXLEN_SHIFT),
8537 NIC_SRAM_TX_BUFFER_DESC);
8538 txrcb += TG3_BDINFO_SIZE;
8539 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008540
8541 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
Matt Carlson7cb32cf2010-09-30 10:34:36 +00008542 ((tp->rx_ret_ring_mask + 1) <<
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008543 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8544
8545 stblk += 8;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008546 rxrcb += TG3_BDINFO_SIZE;
8547 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008548}
8549
Matt Carlsoneb07a942011-04-20 07:57:36 +00008550static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8551{
8552 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8553
Joe Perches63c3a662011-04-26 08:12:10 +00008554 if (!tg3_flag(tp, 5750_PLUS) ||
8555 tg3_flag(tp, 5780_CLASS) ||
Matt Carlsoneb07a942011-04-20 07:57:36 +00008556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Matt Carlson513aa6e2011-11-21 15:01:18 +00008557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8558 tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008559 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8560 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8562 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8563 else
8564 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8565
8566 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8567 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8568
8569 val = min(nic_rep_thresh, host_rep_thresh);
8570 tw32(RCVBDI_STD_THRESH, val);
8571
Joe Perches63c3a662011-04-26 08:12:10 +00008572 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008573 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8574
Joe Perches63c3a662011-04-26 08:12:10 +00008575 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008576 return;
8577
Matt Carlson513aa6e2011-11-21 15:01:18 +00008578 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
Matt Carlsoneb07a942011-04-20 07:57:36 +00008579
8580 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8581
8582 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8583 tw32(RCVBDI_JUMBO_THRESH, val);
8584
Joe Perches63c3a662011-04-26 08:12:10 +00008585 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00008586 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8587}
8588
Matt Carlsonccd5ba92012-02-13 10:20:08 +00008589static inline u32 calc_crc(unsigned char *buf, int len)
8590{
8591 u32 reg;
8592 u32 tmp;
8593 int j, k;
8594
8595 reg = 0xffffffff;
8596
8597 for (j = 0; j < len; j++) {
8598 reg ^= buf[j];
8599
8600 for (k = 0; k < 8; k++) {
8601 tmp = reg & 0x01;
8602
8603 reg >>= 1;
8604
8605 if (tmp)
8606 reg ^= 0xedb88320;
8607 }
8608 }
8609
8610 return ~reg;
8611}
8612
8613static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8614{
8615 /* accept or reject all multicast frames */
8616 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8617 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8618 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8619 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8620}
8621
8622static void __tg3_set_rx_mode(struct net_device *dev)
8623{
8624 struct tg3 *tp = netdev_priv(dev);
8625 u32 rx_mode;
8626
8627 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8628 RX_MODE_KEEP_VLAN_TAG);
8629
8630#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8631 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8632 * flag clear.
8633 */
8634 if (!tg3_flag(tp, ENABLE_ASF))
8635 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8636#endif
8637
8638 if (dev->flags & IFF_PROMISC) {
8639 /* Promiscuous mode. */
8640 rx_mode |= RX_MODE_PROMISC;
8641 } else if (dev->flags & IFF_ALLMULTI) {
8642 /* Accept all multicast. */
8643 tg3_set_multi(tp, 1);
8644 } else if (netdev_mc_empty(dev)) {
8645 /* Reject all multicast. */
8646 tg3_set_multi(tp, 0);
8647 } else {
8648 /* Accept one or more multicast(s). */
8649 struct netdev_hw_addr *ha;
8650 u32 mc_filter[4] = { 0, };
8651 u32 regidx;
8652 u32 bit;
8653 u32 crc;
8654
8655 netdev_for_each_mc_addr(ha, dev) {
8656 crc = calc_crc(ha->addr, ETH_ALEN);
8657 bit = ~crc & 0x7f;
8658 regidx = (bit & 0x60) >> 5;
8659 bit &= 0x1f;
8660 mc_filter[regidx] |= (1 << bit);
8661 }
8662
8663 tw32(MAC_HASH_REG_0, mc_filter[0]);
8664 tw32(MAC_HASH_REG_1, mc_filter[1]);
8665 tw32(MAC_HASH_REG_2, mc_filter[2]);
8666 tw32(MAC_HASH_REG_3, mc_filter[3]);
8667 }
8668
8669 if (rx_mode != tp->rx_mode) {
8670 tp->rx_mode = rx_mode;
8671 tw32_f(MAC_RX_MODE, rx_mode);
8672 udelay(10);
8673 }
8674}
8675
Michael Chan91024262012-09-28 07:12:38 +00008676static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
Matt Carlson90415472011-12-16 13:33:23 +00008677{
8678 int i;
8679
8680 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
Michael Chan91024262012-09-28 07:12:38 +00008681 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
Matt Carlson90415472011-12-16 13:33:23 +00008682}
8683
8684static void tg3_rss_check_indir_tbl(struct tg3 *tp)
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008685{
8686 int i;
8687
8688 if (!tg3_flag(tp, SUPPORT_MSIX))
8689 return;
8690
Matt Carlson90415472011-12-16 13:33:23 +00008691 if (tp->irq_cnt <= 2) {
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008692 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
Matt Carlson90415472011-12-16 13:33:23 +00008693 return;
8694 }
8695
8696 /* Validate table against current IRQ count */
8697 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8698 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8699 break;
8700 }
8701
8702 if (i != TG3_RSS_INDIR_TBL_SIZE)
Michael Chan91024262012-09-28 07:12:38 +00008703 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008704}
8705
Matt Carlson90415472011-12-16 13:33:23 +00008706static void tg3_rss_write_indir_tbl(struct tg3 *tp)
Matt Carlsonbcebcc42011-12-14 11:10:01 +00008707{
8708 int i = 0;
8709 u32 reg = MAC_RSS_INDIR_TBL_0;
8710
8711 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8712 u32 val = tp->rss_ind_tbl[i];
8713 i++;
8714 for (; i % 8; i++) {
8715 val <<= 4;
8716 val |= tp->rss_ind_tbl[i];
8717 }
8718 tw32(reg, val);
8719 reg += 4;
8720 }
8721}
8722
Matt Carlson2d31eca2009-09-01 12:53:31 +00008723/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008724static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008725{
8726 u32 val, rdmac_mode;
8727 int i, err, limit;
Matt Carlson8fea32b2010-09-15 08:59:58 +00008728 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008729
8730 tg3_disable_ints(tp);
8731
8732 tg3_stop_fw(tp);
8733
8734 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8735
Joe Perches63c3a662011-04-26 08:12:10 +00008736 if (tg3_flag(tp, INIT_COMPLETE))
Michael Chane6de8ad2005-05-05 14:42:41 -07008737 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008738
Matt Carlson699c0192010-12-06 08:28:51 +00008739 /* Enable MAC control of LPI */
8740 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8741 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8742 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8743 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8744
8745 tw32_f(TG3_CPMU_EEE_CTRL,
8746 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8747
Matt Carlsona386b902010-12-06 08:28:53 +00008748 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8749 TG3_CPMU_EEEMD_LPI_IN_TX |
8750 TG3_CPMU_EEEMD_LPI_IN_RX |
8751 TG3_CPMU_EEEMD_EEE_ENABLE;
8752
8753 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8754 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8755
Joe Perches63c3a662011-04-26 08:12:10 +00008756 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsona386b902010-12-06 08:28:53 +00008757 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8758
8759 tw32_f(TG3_CPMU_EEE_MODE, val);
8760
8761 tw32_f(TG3_CPMU_EEE_DBTMR1,
8762 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8763 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8764
8765 tw32_f(TG3_CPMU_EEE_DBTMR2,
Matt Carlsond7f2ab22011-01-25 15:58:56 +00008766 TG3_CPMU_DBTMR2_APE_TX_2047US |
Matt Carlsona386b902010-12-06 08:28:53 +00008767 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
Matt Carlson699c0192010-12-06 08:28:51 +00008768 }
8769
Matt Carlson603f1172010-02-12 14:47:10 +00008770 if (reset_phy)
Michael Chand4d2c552006-03-20 17:47:20 -08008771 tg3_phy_reset(tp);
8772
Linus Torvalds1da177e2005-04-16 15:20:36 -07008773 err = tg3_chip_reset(tp);
8774 if (err)
8775 return err;
8776
8777 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8778
Matt Carlsonbcb37f62008-11-03 16:52:09 -08008779 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07008780 val = tr32(TG3_CPMU_CTRL);
8781 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8782 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08008783
8784 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8785 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8786 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8787 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8788
8789 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8790 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8791 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8792 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8793
8794 val = tr32(TG3_CPMU_HST_ACC);
8795 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8796 val |= CPMU_HST_ACC_MACCLK_6_25;
8797 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07008798 }
8799
Matt Carlson33466d92009-04-20 06:57:41 +00008800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8801 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8802 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8803 PCIE_PWR_MGMT_L1_THRESH_4MS;
8804 tw32(PCIE_PWR_MGMT_THRESH, val);
Matt Carlson521e6b92009-08-25 10:06:01 +00008805
8806 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8807 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8808
8809 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
Matt Carlson33466d92009-04-20 06:57:41 +00008810
Matt Carlsonf40386c2009-11-02 14:24:02 +00008811 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8812 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
Matt Carlson255ca312009-08-25 10:07:27 +00008813 }
8814
Joe Perches63c3a662011-04-26 08:12:10 +00008815 if (tg3_flag(tp, L1PLLPD_EN)) {
Matt Carlson614b0592010-01-20 16:58:02 +00008816 u32 grc_mode = tr32(GRC_MODE);
8817
8818 /* Access the lower 1K of PL PCIE block registers. */
8819 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8820 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8821
8822 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8823 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8824 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8825
8826 tw32(GRC_MODE, grc_mode);
8827 }
8828
Matt Carlson55086ad2011-12-14 11:09:59 +00008829 if (tg3_flag(tp, 57765_CLASS)) {
Matt Carlson5093eed2010-11-24 08:31:45 +00008830 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8831 u32 grc_mode = tr32(GRC_MODE);
Matt Carlsoncea46462010-04-12 06:58:24 +00008832
Matt Carlson5093eed2010-11-24 08:31:45 +00008833 /* Access the lower 1K of PL PCIE block registers. */
8834 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8835 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
Matt Carlsoncea46462010-04-12 06:58:24 +00008836
Matt Carlson5093eed2010-11-24 08:31:45 +00008837 val = tr32(TG3_PCIE_TLDLPL_PORT +
8838 TG3_PCIE_PL_LO_PHYCTL5);
8839 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8840 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
Matt Carlsoncea46462010-04-12 06:58:24 +00008841
Matt Carlson5093eed2010-11-24 08:31:45 +00008842 tw32(GRC_MODE, grc_mode);
8843 }
Matt Carlsona977dbe2010-04-12 06:58:26 +00008844
Matt Carlson1ff30a52011-05-19 12:12:46 +00008845 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8846 u32 grc_mode = tr32(GRC_MODE);
8847
8848 /* Access the lower 1K of DL PCIE block registers. */
8849 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8850 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8851
8852 val = tr32(TG3_PCIE_TLDLPL_PORT +
8853 TG3_PCIE_DL_LO_FTSMAX);
8854 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8855 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8856 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8857
8858 tw32(GRC_MODE, grc_mode);
8859 }
8860
Matt Carlsona977dbe2010-04-12 06:58:26 +00008861 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8862 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8863 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8864 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
Matt Carlsoncea46462010-04-12 06:58:24 +00008865 }
8866
Linus Torvalds1da177e2005-04-16 15:20:36 -07008867 /* This works around an issue with Athlon chipsets on
8868 * B3 tigon3 silicon. This bit has no effect on any
8869 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07008870 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008871 */
Joe Perches63c3a662011-04-26 08:12:10 +00008872 if (!tg3_flag(tp, CPMU_PRESENT)) {
8873 if (!tg3_flag(tp, PCI_EXPRESS))
Matt Carlson795d01c2007-10-07 23:28:17 -07008874 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8875 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008877
8878 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008879 tg3_flag(tp, PCIX_MODE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008880 val = tr32(TG3PCI_PCISTATE);
8881 val |= PCISTATE_RETRY_SAME_DMA;
8882 tw32(TG3PCI_PCISTATE, val);
8883 }
8884
Joe Perches63c3a662011-04-26 08:12:10 +00008885 if (tg3_flag(tp, ENABLE_APE)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -07008886 /* Allow reads and writes to the
8887 * APE register and memory space.
8888 */
8889 val = tr32(TG3PCI_PCISTATE);
8890 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +00008891 PCISTATE_ALLOW_APE_SHMEM_WR |
8892 PCISTATE_ALLOW_APE_PSPACE_WR;
Matt Carlson0d3031d2007-10-10 18:02:43 -07008893 tw32(TG3PCI_PCISTATE, val);
8894 }
8895
Linus Torvalds1da177e2005-04-16 15:20:36 -07008896 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8897 /* Enable some hw fixes. */
8898 val = tr32(TG3PCI_MSI_DATA);
8899 val |= (1 << 26) | (1 << 28) | (1 << 29);
8900 tw32(TG3PCI_MSI_DATA, val);
8901 }
8902
8903 /* Descriptor ring init may make accesses to the
8904 * NIC SRAM area to setup the TX descriptors, so we
8905 * can only do this after the hardware has been
8906 * successfully reset.
8907 */
Michael Chan32d8c572006-07-25 16:38:29 -07008908 err = tg3_init_rings(tp);
8909 if (err)
8910 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008911
Joe Perches63c3a662011-04-26 08:12:10 +00008912 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsoncbf9ca62009-11-13 13:03:40 +00008913 val = tr32(TG3PCI_DMA_RW_CTRL) &
8914 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
Matt Carlson1a319022010-04-12 06:58:25 +00008915 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8916 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
Matt Carlson55086ad2011-12-14 11:09:59 +00008917 if (!tg3_flag(tp, 57765_CLASS) &&
Matt Carlson0aebff42011-04-25 12:42:45 +00008918 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8919 val |= DMA_RWCTRL_TAGGED_STAT_WA;
Matt Carlsoncbf9ca62009-11-13 13:03:40 +00008920 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8921 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8922 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07008923 /* This value is determined during the probe time DMA
8924 * engine test, tg3_test_dma.
8925 */
8926 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008928
8929 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8930 GRC_MODE_4X_NIC_SEND_RINGS |
8931 GRC_MODE_NO_TX_PHDR_CSUM |
8932 GRC_MODE_NO_RX_PHDR_CSUM);
8933 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07008934
8935 /* Pseudo-header checksum is done by hardware logic and not
8936 * the offload processers, so make the chip do the pseudo-
8937 * header checksums on receive. For transmit it is more
8938 * convenient to do the pseudo-header checksum in software
8939 * as Linux does that on transmit for us in all cases.
8940 */
8941 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008942
8943 tw32(GRC_MODE,
8944 tp->grc_mode |
8945 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8946
8947 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8948 val = tr32(GRC_MISC_CFG);
8949 val &= ~0xff;
8950 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8951 tw32(GRC_MISC_CFG, val);
8952
8953 /* Initialize MBUF/DESC pool. */
Joe Perches63c3a662011-04-26 08:12:10 +00008954 if (tg3_flag(tp, 5750_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008955 /* Do nothing. */
8956 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8957 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8959 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8960 else
8961 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8962 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8963 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
Joe Perches63c3a662011-04-26 08:12:10 +00008964 } else if (tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008965 int fw_len;
8966
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -08008967 fw_len = tp->fw_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008968 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8969 tw32(BUFMGR_MB_POOL_ADDR,
8970 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8971 tw32(BUFMGR_MB_POOL_SIZE,
8972 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008974
Michael Chan0f893dc2005-07-25 12:30:38 -07008975 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008976 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8977 tp->bufmgr_config.mbuf_read_dma_low_water);
8978 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8979 tp->bufmgr_config.mbuf_mac_rx_low_water);
8980 tw32(BUFMGR_MB_HIGH_WATER,
8981 tp->bufmgr_config.mbuf_high_water);
8982 } else {
8983 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8984 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8985 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8986 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8987 tw32(BUFMGR_MB_HIGH_WATER,
8988 tp->bufmgr_config.mbuf_high_water_jumbo);
8989 }
8990 tw32(BUFMGR_DMA_LOW_WATER,
8991 tp->bufmgr_config.dma_low_water);
8992 tw32(BUFMGR_DMA_HIGH_WATER,
8993 tp->bufmgr_config.dma_high_water);
8994
Matt Carlsond309a462010-09-30 10:34:31 +00008995 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8997 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
Matt Carlson4d958472011-04-20 07:57:35 +00008998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8999 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9000 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9001 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
Matt Carlsond309a462010-09-30 10:34:31 +00009002 tw32(BUFMGR_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009003 for (i = 0; i < 2000; i++) {
9004 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9005 break;
9006 udelay(10);
9007 }
9008 if (i >= 2000) {
Joe Perches05dbe002010-02-17 19:44:19 +00009009 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009010 return -ENODEV;
9011 }
9012
Matt Carlsoneb07a942011-04-20 07:57:36 +00009013 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9014 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
Michael Chanb5d37722006-09-27 16:06:21 -07009015
Matt Carlsoneb07a942011-04-20 07:57:36 +00009016 tg3_setup_rxbd_thresholds(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009017
9018 /* Initialize TG3_BDINFO's at:
9019 * RCVDBDI_STD_BD: standard eth size rx ring
9020 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9021 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9022 *
9023 * like so:
9024 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9025 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9026 * ring attribute flags
9027 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9028 *
9029 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9030 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9031 *
9032 * The size of each ring is fixed in the firmware, but the location is
9033 * configurable.
9034 */
9035 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
Matt Carlson21f581a2009-08-28 14:00:25 +00009036 ((u64) tpr->rx_std_mapping >> 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009037 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
Matt Carlson21f581a2009-08-28 14:00:25 +00009038 ((u64) tpr->rx_std_mapping & 0xffffffff));
Joe Perches63c3a662011-04-26 08:12:10 +00009039 if (!tg3_flag(tp, 5717_PLUS))
Matt Carlson87668d32009-11-13 13:03:34 +00009040 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9041 NIC_SRAM_RX_BUFFER_DESC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009042
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009043 /* Disable the mini ring */
Joe Perches63c3a662011-04-26 08:12:10 +00009044 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009045 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9046 BDINFO_FLAGS_DISABLED);
9047
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009048 /* Program the jumbo buffer descriptor ring control
9049 * blocks on those devices that have them.
9050 */
Matt Carlsona0512942011-07-27 14:20:54 +00009051 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
Joe Perches63c3a662011-04-26 08:12:10 +00009052 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009053
Joe Perches63c3a662011-04-26 08:12:10 +00009054 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009055 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
Matt Carlson21f581a2009-08-28 14:00:25 +00009056 ((u64) tpr->rx_jmb_mapping >> 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009057 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
Matt Carlson21f581a2009-08-28 14:00:25 +00009058 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
Matt Carlsonde9f5232011-04-05 14:22:43 +00009059 val = TG3_RX_JMB_RING_SIZE(tp) <<
9060 BDINFO_FLAGS_MAXLEN_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009061 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
Matt Carlsonde9f5232011-04-05 14:22:43 +00009062 val | BDINFO_FLAGS_USE_EXT_RECV);
Joe Perches63c3a662011-04-26 08:12:10 +00009063 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
Matt Carlson55086ad2011-12-14 11:09:59 +00009064 tg3_flag(tp, 57765_CLASS))
Matt Carlson87668d32009-11-13 13:03:34 +00009065 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9066 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009067 } else {
9068 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9069 BDINFO_FLAGS_DISABLED);
9070 }
9071
Joe Perches63c3a662011-04-26 08:12:10 +00009072 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsonfa6b2aa2011-11-21 15:01:19 +00009073 val = TG3_RX_STD_RING_SIZE(tp);
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009074 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9075 val |= (TG3_RX_STD_DMA_SZ << 2);
9076 } else
Matt Carlson04380d42010-04-12 06:58:29 +00009077 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009078 } else
Matt Carlsonde9f5232011-04-05 14:22:43 +00009079 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009080
9081 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009082
Matt Carlson411da642009-11-13 13:03:46 +00009083 tpr->rx_std_prod_idx = tp->rx_pending;
Matt Carlson66711e62009-11-13 13:03:49 +00009084 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009085
Joe Perches63c3a662011-04-26 08:12:10 +00009086 tpr->rx_jmb_prod_idx =
9087 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
Matt Carlson66711e62009-11-13 13:03:49 +00009088 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009089
Matt Carlson2d31eca2009-09-01 12:53:31 +00009090 tg3_rings_reset(tp);
9091
Linus Torvalds1da177e2005-04-16 15:20:36 -07009092 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07009093 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009094
9095 /* MTU + ethernet header + FCS + optional VLAN tag */
Matt Carlsonf7b493e2009-02-25 14:21:52 +00009096 tw32(MAC_RX_MTU_SIZE,
9097 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009098
9099 /* The slot time is changed by tg3_setup_phy if we
9100 * run at gigabit with half duplex.
9101 */
Matt Carlsonf2096f92011-04-05 14:22:48 +00009102 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9103 (6 << TX_LENGTHS_IPG_SHIFT) |
9104 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9105
9106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9107 val |= tr32(MAC_TX_LENGTHS) &
9108 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9109 TX_LENGTHS_CNT_DWN_VAL_MSK);
9110
9111 tw32(MAC_TX_LENGTHS, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009112
9113 /* Receive rules. */
9114 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9115 tw32(RCVLPC_CONFIG, 0x0181);
9116
9117 /* Calculate RDMAC_MODE setting early, we need it to determine
9118 * the RCVLPC_STATE_ENABLE mask.
9119 */
9120 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9121 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9122 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9123 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9124 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07009125
Matt Carlsondeabaac2010-11-24 08:31:50 +00009126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
Matt Carlson0339e4e2010-02-12 14:47:09 +00009127 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9128
Matt Carlson57e69832008-05-25 23:48:31 -07009129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson321d32a2008-11-21 17:22:19 -08009130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
Matt Carlsond30cdd22007-10-07 23:28:35 -07009132 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9133 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9134 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9135
Matt Carlsonc5908932011-03-09 16:58:25 +00009136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9137 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +00009138 if (tg3_flag(tp, TSO_CAPABLE) &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07009139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009140 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9141 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009142 !tg3_flag(tp, IS_5788)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009143 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9144 }
9145 }
9146
Joe Perches63c3a662011-04-26 08:12:10 +00009147 if (tg3_flag(tp, PCI_EXPRESS))
Michael Chan85e94ce2005-04-21 17:05:28 -07009148 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9149
Joe Perches63c3a662011-04-26 08:12:10 +00009150 if (tg3_flag(tp, HW_TSO_1) ||
9151 tg3_flag(tp, HW_TSO_2) ||
9152 tg3_flag(tp, HW_TSO_3))
Matt Carlson027455a2008-12-21 20:19:30 -08009153 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9154
Matt Carlson108a6c12011-05-19 12:12:47 +00009155 if (tg3_flag(tp, 57765_PLUS) ||
Matt Carlsone849cdc2009-11-13 13:03:38 +00009156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Matt Carlson027455a2008-12-21 20:19:30 -08009157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9158 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009159
Matt Carlsonf2096f92011-04-05 14:22:48 +00009160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9161 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9162
Matt Carlson41a8a7e2010-09-15 08:59:53 +00009163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9164 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9165 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
Joe Perches63c3a662011-04-26 08:12:10 +00009167 tg3_flag(tp, 57765_PLUS)) {
Matt Carlson41a8a7e2010-09-15 08:59:53 +00009168 val = tr32(TG3_RDMA_RSRVCTRL_REG);
Michael Chan10ce95d2012-07-29 19:15:42 +00009169 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
Matt Carlsonb4495ed2011-01-25 15:58:47 +00009170 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9171 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9172 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9173 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9174 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9175 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
Matt Carlsonb75cc0e2010-11-24 08:31:46 +00009176 }
Matt Carlson41a8a7e2010-09-15 08:59:53 +00009177 tw32(TG3_RDMA_RSRVCTRL_REG,
9178 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9179 }
9180
Matt Carlsond78b59f2011-04-05 14:22:46 +00009181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
Matt Carlsond309a462010-09-30 10:34:31 +00009183 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9184 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9185 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9186 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9187 }
9188
Linus Torvalds1da177e2005-04-16 15:20:36 -07009189 /* Receive/send statistics. */
Joe Perches63c3a662011-04-26 08:12:10 +00009190 if (tg3_flag(tp, 5750_PLUS)) {
Michael Chan16613942006-06-29 20:15:13 -07009191 val = tr32(RCVLPC_STATS_ENABLE);
9192 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9193 tw32(RCVLPC_STATS_ENABLE, val);
9194 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009195 tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009196 val = tr32(RCVLPC_STATS_ENABLE);
9197 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9198 tw32(RCVLPC_STATS_ENABLE, val);
9199 } else {
9200 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9201 }
9202 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9203 tw32(SNDDATAI_STATSENAB, 0xffffff);
9204 tw32(SNDDATAI_STATSCTRL,
9205 (SNDDATAI_SCTRL_ENABLE |
9206 SNDDATAI_SCTRL_FASTUPD));
9207
9208 /* Setup host coalescing engine. */
9209 tw32(HOSTCC_MODE, 0);
9210 for (i = 0; i < 2000; i++) {
9211 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9212 break;
9213 udelay(10);
9214 }
9215
Michael Chand244c892005-07-05 14:42:33 -07009216 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009217
Joe Perches63c3a662011-04-26 08:12:10 +00009218 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009219 /* Status/statistics block address. See tg3_timer,
9220 * the tg3_periodic_fetch_stats call there, and
9221 * tg3_get_stats to see how this works for 5705/5750 chips.
9222 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009223 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9224 ((u64) tp->stats_mapping >> 32));
9225 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9226 ((u64) tp->stats_mapping & 0xffffffff));
9227 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
Matt Carlson2d31eca2009-09-01 12:53:31 +00009228
Linus Torvalds1da177e2005-04-16 15:20:36 -07009229 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
Matt Carlson2d31eca2009-09-01 12:53:31 +00009230
9231 /* Clear statistics and status block memory areas */
9232 for (i = NIC_SRAM_STATS_BLK;
9233 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9234 i += sizeof(u32)) {
9235 tg3_write_mem(tp, i, 0);
9236 udelay(40);
9237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009238 }
9239
9240 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9241
9242 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9243 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009244 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009245 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9246
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009247 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9248 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chanc94e3942005-09-27 12:12:42 -07009249 /* reset to prevent losing 1st rx packet intermittently */
9250 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9251 udelay(10);
9252 }
9253
Matt Carlson3bda1252008-08-15 14:08:22 -07009254 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Matt Carlson9e975cc2011-07-20 10:20:50 +00009255 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9256 MAC_MODE_FHDE_ENABLE;
9257 if (tg3_flag(tp, ENABLE_APE))
9258 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
Joe Perches63c3a662011-04-26 08:12:10 +00009259 if (!tg3_flag(tp, 5705_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009260 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009261 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9262 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009263 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9264 udelay(40);
9265
Michael Chan314fba32005-04-21 17:07:04 -07009266 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Joe Perches63c3a662011-04-26 08:12:10 +00009267 * If TG3_FLAG_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07009268 * register to preserve the GPIO settings for LOMs. The GPIOs,
9269 * whether used as inputs or outputs, are set by boot code after
9270 * reset.
9271 */
Joe Perches63c3a662011-04-26 08:12:10 +00009272 if (!tg3_flag(tp, IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07009273 u32 gpio_mask;
9274
Michael Chan9d26e212006-12-07 00:21:14 -08009275 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9276 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9277 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07009278
9279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9280 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9281 GRC_LCLCTRL_GPIO_OUTPUT3;
9282
Michael Chanaf36e6b2006-03-23 01:28:06 -08009283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9284 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9285
Gary Zambranoaaf84462007-05-05 11:51:45 -07009286 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07009287 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9288
9289 /* GPIO1 must be driven high for eeprom write protect */
Joe Perches63c3a662011-04-26 08:12:10 +00009290 if (tg3_flag(tp, EEPROM_WRITE_PROT))
Michael Chan9d26e212006-12-07 00:21:14 -08009291 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9292 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07009293 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009294 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9295 udelay(100);
9296
Matt Carlsonc3b50032012-01-17 15:27:23 +00009297 if (tg3_flag(tp, USING_MSIX)) {
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009298 val = tr32(MSGINT_MODE);
Matt Carlsonc3b50032012-01-17 15:27:23 +00009299 val |= MSGINT_MODE_ENABLE;
9300 if (tp->irq_cnt > 1)
9301 val |= MSGINT_MODE_MULTIVEC_EN;
Matt Carlson5b39de92011-08-31 11:44:50 +00009302 if (!tg3_flag(tp, 1SHOT_MSI))
9303 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009304 tw32(MSGINT_MODE, val);
9305 }
9306
Joe Perches63c3a662011-04-26 08:12:10 +00009307 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009308 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9309 udelay(40);
9310 }
9311
9312 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9313 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9314 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9315 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9316 WDMAC_MODE_LNGREAD_ENAB);
9317
Matt Carlsonc5908932011-03-09 16:58:25 +00009318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9319 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +00009320 if (tg3_flag(tp, TSO_CAPABLE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009321 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9322 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9323 /* nothing */
9324 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009325 !tg3_flag(tp, IS_5788)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009326 val |= WDMAC_MODE_RX_ACCEL;
9327 }
9328 }
9329
Michael Chand9ab5ad2006-03-20 22:27:35 -08009330 /* Enable host coalescing bug fix */
Joe Perches63c3a662011-04-26 08:12:10 +00009331 if (tg3_flag(tp, 5755_PLUS))
Matt Carlsonf51f3562008-05-25 23:45:08 -07009332 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08009333
Matt Carlson788a0352009-11-02 14:26:03 +00009334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9335 val |= WDMAC_MODE_BURST_ALL_DATA;
9336
Linus Torvalds1da177e2005-04-16 15:20:36 -07009337 tw32_f(WDMAC_MODE, val);
9338 udelay(40);
9339
Joe Perches63c3a662011-04-26 08:12:10 +00009340 if (tg3_flag(tp, PCIX_MODE)) {
Matt Carlson9974a352007-10-07 23:27:28 -07009341 u16 pcix_cmd;
9342
9343 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9344 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07009346 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9347 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009348 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07009349 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9350 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009351 }
Matt Carlson9974a352007-10-07 23:27:28 -07009352 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9353 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009354 }
9355
9356 tw32_f(RDMAC_MODE, rdmac_mode);
9357 udelay(40);
9358
Michael Chan091f0ea2012-07-29 19:15:43 +00009359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9360 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9361 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9362 break;
9363 }
9364 if (i < TG3_NUM_RDMA_CHANNELS) {
9365 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9366 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9367 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9368 tg3_flag_set(tp, 5719_RDMA_BUG);
9369 }
9370 }
9371
Linus Torvalds1da177e2005-04-16 15:20:36 -07009372 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009373 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009374 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07009375
9376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9377 tw32(SNDDATAC_MODE,
9378 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9379 else
9380 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9381
Linus Torvalds1da177e2005-04-16 15:20:36 -07009382 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9383 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009384 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
Joe Perches63c3a662011-04-26 08:12:10 +00009385 if (tg3_flag(tp, LRG_PROD_RING_CAP))
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009386 val |= RCVDBDI_MODE_LRG_RING_SZ;
9387 tw32(RCVDBDI_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009388 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009389 if (tg3_flag(tp, HW_TSO_1) ||
9390 tg3_flag(tp, HW_TSO_2) ||
9391 tg3_flag(tp, HW_TSO_3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009392 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009393 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00009394 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009395 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9396 tw32(SNDBDI_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009397 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9398
9399 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9400 err = tg3_load_5701_a0_firmware_fix(tp);
9401 if (err)
9402 return err;
9403 }
9404
Joe Perches63c3a662011-04-26 08:12:10 +00009405 if (tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009406 err = tg3_load_tso_firmware(tp);
9407 if (err)
9408 return err;
9409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009410
9411 tp->tx_mode = TX_MODE_ENABLE;
Matt Carlsonf2096f92011-04-05 14:22:48 +00009412
Joe Perches63c3a662011-04-26 08:12:10 +00009413 if (tg3_flag(tp, 5755_PLUS) ||
Matt Carlsonb1d05212010-06-05 17:24:31 +00009414 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9415 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
Matt Carlsonf2096f92011-04-05 14:22:48 +00009416
9417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9418 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9419 tp->tx_mode &= ~val;
9420 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9421 }
9422
Linus Torvalds1da177e2005-04-16 15:20:36 -07009423 tw32_f(MAC_TX_MODE, tp->tx_mode);
9424 udelay(100);
9425
Joe Perches63c3a662011-04-26 08:12:10 +00009426 if (tg3_flag(tp, ENABLE_RSS)) {
Matt Carlsonbcebcc42011-12-14 11:10:01 +00009427 tg3_rss_write_indir_tbl(tp);
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009428
9429 /* Setup the "secret" hash key. */
9430 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9431 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9432 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9433 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9434 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9435 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9436 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9437 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9438 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9439 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9440 }
9441
Linus Torvalds1da177e2005-04-16 15:20:36 -07009442 tp->rx_mode = RX_MODE_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00009443 if (tg3_flag(tp, 5755_PLUS))
Michael Chanaf36e6b2006-03-23 01:28:06 -08009444 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9445
Joe Perches63c3a662011-04-26 08:12:10 +00009446 if (tg3_flag(tp, ENABLE_RSS))
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009447 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9448 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9449 RX_MODE_RSS_IPV6_HASH_EN |
9450 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9451 RX_MODE_RSS_IPV4_HASH_EN |
9452 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9453
Linus Torvalds1da177e2005-04-16 15:20:36 -07009454 tw32_f(MAC_RX_MODE, tp->rx_mode);
9455 udelay(10);
9456
Linus Torvalds1da177e2005-04-16 15:20:36 -07009457 tw32(MAC_LED_CTRL, tp->led_ctrl);
9458
9459 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009460 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009461 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9462 udelay(10);
9463 }
9464 tw32_f(MAC_RX_MODE, tp->rx_mode);
9465 udelay(10);
9466
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009467 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009469 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009470 /* Set drive transmission level to 1.2V */
9471 /* only if the signal pre-emphasis bit is not set */
9472 val = tr32(MAC_SERDES_CFG);
9473 val &= 0xfffff000;
9474 val |= 0x880;
9475 tw32(MAC_SERDES_CFG, val);
9476 }
9477 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9478 tw32(MAC_SERDES_CFG, 0x616000);
9479 }
9480
9481 /* Prevent chip from dropping frames when flow control
9482 * is enabled.
9483 */
Matt Carlson55086ad2011-12-14 11:09:59 +00009484 if (tg3_flag(tp, 57765_CLASS))
Matt Carlson666bc832010-01-20 16:58:03 +00009485 val = 1;
9486 else
9487 val = 2;
9488 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009489
9490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009491 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009492 /* Use hardware link auto-negotiation */
Joe Perches63c3a662011-04-26 08:12:10 +00009493 tg3_flag_set(tp, HW_AUTONEG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009494 }
9495
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009496 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
Matt Carlson6ff6f812011-05-19 12:12:54 +00009497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
Michael Chand4d2c552006-03-20 17:47:20 -08009498 u32 tmp;
9499
9500 tmp = tr32(SERDES_RX_CTRL);
9501 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9502 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9503 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9504 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9505 }
9506
Joe Perches63c3a662011-04-26 08:12:10 +00009507 if (!tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonc6700ce2012-02-13 15:20:15 +00009508 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Matt Carlson800960682010-08-02 11:26:06 +00009509 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009510
Matt Carlsondd477002008-05-25 23:45:58 -07009511 err = tg3_setup_phy(tp, 0);
9512 if (err)
9513 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009514
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009515 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9516 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
Matt Carlsondd477002008-05-25 23:45:58 -07009517 u32 tmp;
9518
9519 /* Clear CRC stats. */
9520 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9521 tg3_writephy(tp, MII_TG3_TEST1,
9522 tmp | MII_TG3_TEST1_CRC_EN);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00009523 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
Matt Carlsondd477002008-05-25 23:45:58 -07009524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009525 }
9526 }
9527
9528 __tg3_set_rx_mode(tp->dev);
9529
9530 /* Initialize receive rules. */
9531 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9532 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9533 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9534 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9535
Joe Perches63c3a662011-04-26 08:12:10 +00009536 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009537 limit = 8;
9538 else
9539 limit = 16;
Joe Perches63c3a662011-04-26 08:12:10 +00009540 if (tg3_flag(tp, ENABLE_ASF))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009541 limit -= 4;
9542 switch (limit) {
9543 case 16:
9544 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9545 case 15:
9546 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9547 case 14:
9548 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9549 case 13:
9550 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9551 case 12:
9552 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9553 case 11:
9554 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9555 case 10:
9556 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9557 case 9:
9558 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9559 case 8:
9560 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9561 case 7:
9562 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9563 case 6:
9564 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9565 case 5:
9566 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9567 case 4:
9568 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9569 case 3:
9570 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9571 case 2:
9572 case 1:
9573
9574 default:
9575 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07009576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009577
Joe Perches63c3a662011-04-26 08:12:10 +00009578 if (tg3_flag(tp, ENABLE_APE))
Matt Carlson9ce768e2007-10-11 19:49:11 -07009579 /* Write our heartbeat update interval to APE. */
9580 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9581 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07009582
Linus Torvalds1da177e2005-04-16 15:20:36 -07009583 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9584
Linus Torvalds1da177e2005-04-16 15:20:36 -07009585 return 0;
9586}
9587
9588/* Called at device open time to get the chip ready for
9589 * packet processing. Invoked with tp->lock held.
9590 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07009591static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009592{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009593 tg3_switch_clocks(tp);
9594
9595 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9596
Matt Carlson2f751b62008-08-04 23:17:34 -07009597 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009598}
9599
Michael Chanaed93e02012-07-16 16:24:02 +00009600static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9601{
9602 int i;
9603
9604 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9605 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9606
9607 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9608 off += len;
9609
9610 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9611 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9612 memset(ocir, 0, TG3_OCIR_LEN);
9613 }
9614}
9615
9616/* sysfs attributes for hwmon */
9617static ssize_t tg3_show_temp(struct device *dev,
9618 struct device_attribute *devattr, char *buf)
9619{
9620 struct pci_dev *pdev = to_pci_dev(dev);
9621 struct net_device *netdev = pci_get_drvdata(pdev);
9622 struct tg3 *tp = netdev_priv(netdev);
9623 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9624 u32 temperature;
9625
9626 spin_lock_bh(&tp->lock);
9627 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9628 sizeof(temperature));
9629 spin_unlock_bh(&tp->lock);
9630 return sprintf(buf, "%u\n", temperature);
9631}
9632
9633
9634static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9635 TG3_TEMP_SENSOR_OFFSET);
9636static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9637 TG3_TEMP_CAUTION_OFFSET);
9638static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9639 TG3_TEMP_MAX_OFFSET);
9640
9641static struct attribute *tg3_attributes[] = {
9642 &sensor_dev_attr_temp1_input.dev_attr.attr,
9643 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9644 &sensor_dev_attr_temp1_max.dev_attr.attr,
9645 NULL
9646};
9647
9648static const struct attribute_group tg3_group = {
9649 .attrs = tg3_attributes,
9650};
9651
Michael Chanaed93e02012-07-16 16:24:02 +00009652static void tg3_hwmon_close(struct tg3 *tp)
9653{
Michael Chanaed93e02012-07-16 16:24:02 +00009654 if (tp->hwmon_dev) {
9655 hwmon_device_unregister(tp->hwmon_dev);
9656 tp->hwmon_dev = NULL;
9657 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9658 }
Michael Chanaed93e02012-07-16 16:24:02 +00009659}
9660
9661static void tg3_hwmon_open(struct tg3 *tp)
9662{
Michael Chanaed93e02012-07-16 16:24:02 +00009663 int i, err;
9664 u32 size = 0;
9665 struct pci_dev *pdev = tp->pdev;
9666 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9667
9668 tg3_sd_scan_scratchpad(tp, ocirs);
9669
9670 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9671 if (!ocirs[i].src_data_length)
9672 continue;
9673
9674 size += ocirs[i].src_hdr_length;
9675 size += ocirs[i].src_data_length;
9676 }
9677
9678 if (!size)
9679 return;
9680
9681 /* Register hwmon sysfs hooks */
9682 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9683 if (err) {
9684 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9685 return;
9686 }
9687
9688 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9689 if (IS_ERR(tp->hwmon_dev)) {
9690 tp->hwmon_dev = NULL;
9691 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9692 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9693 }
Michael Chanaed93e02012-07-16 16:24:02 +00009694}
9695
9696
Linus Torvalds1da177e2005-04-16 15:20:36 -07009697#define TG3_STAT_ADD32(PSTAT, REG) \
9698do { u32 __val = tr32(REG); \
9699 (PSTAT)->low += __val; \
9700 if ((PSTAT)->low < __val) \
9701 (PSTAT)->high += 1; \
9702} while (0)
9703
9704static void tg3_periodic_fetch_stats(struct tg3 *tp)
9705{
9706 struct tg3_hw_stats *sp = tp->hw_stats;
9707
9708 if (!netif_carrier_ok(tp->dev))
9709 return;
9710
9711 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9712 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9713 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9714 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9715 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9716 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9717 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9718 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9719 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9720 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9721 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9722 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9723 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
Michael Chan091f0ea2012-07-29 19:15:43 +00009724 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9725 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9726 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9727 u32 val;
9728
9729 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9730 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9731 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9732 tg3_flag_clear(tp, 5719_RDMA_BUG);
9733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009734
9735 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9736 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9737 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9738 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9739 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9740 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9741 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9742 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9743 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9744 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9745 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9746 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9747 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9748 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07009749
9750 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
Matt Carlson310050f2011-05-19 12:12:55 +00009751 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9752 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9753 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
Matt Carlson4d958472011-04-20 07:57:35 +00009754 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9755 } else {
9756 u32 val = tr32(HOSTCC_FLOW_ATTN);
9757 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9758 if (val) {
9759 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9760 sp->rx_discards.low += val;
9761 if (sp->rx_discards.low < val)
9762 sp->rx_discards.high += 1;
9763 }
9764 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9765 }
Michael Chan463d3052006-05-22 16:36:27 -07009766 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009767}
9768
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009769static void tg3_chk_missed_msi(struct tg3 *tp)
9770{
9771 u32 i;
9772
9773 for (i = 0; i < tp->irq_cnt; i++) {
9774 struct tg3_napi *tnapi = &tp->napi[i];
9775
9776 if (tg3_has_work(tnapi)) {
9777 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9778 tnapi->last_tx_cons == tnapi->tx_cons) {
9779 if (tnapi->chk_msi_cnt < 1) {
9780 tnapi->chk_msi_cnt++;
9781 return;
9782 }
Matt Carlson7f230732011-08-31 11:44:48 +00009783 tg3_msi(0, tnapi);
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009784 }
9785 }
9786 tnapi->chk_msi_cnt = 0;
9787 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9788 tnapi->last_tx_cons = tnapi->tx_cons;
9789 }
9790}
9791
Linus Torvalds1da177e2005-04-16 15:20:36 -07009792static void tg3_timer(unsigned long __opaque)
9793{
9794 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009795
Matt Carlson5b190622011-11-04 09:15:04 +00009796 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
Michael Chanf475f162006-03-27 23:20:14 -08009797 goto restart_timer;
9798
David S. Millerf47c11e2005-06-24 20:18:35 -07009799 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009800
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00009802 tg3_flag(tp, 57765_CLASS))
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00009803 tg3_chk_missed_msi(tp);
9804
Joe Perches63c3a662011-04-26 08:12:10 +00009805 if (!tg3_flag(tp, TAGGED_STATUS)) {
David S. Millerfac9b832005-05-18 22:46:34 -07009806 /* All of this garbage is because when using non-tagged
9807 * IRQ status the mailbox/status_block protocol the chip
9808 * uses with the cpu is race prone.
9809 */
Matt Carlson898a56f2009-08-28 14:02:40 +00009810 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
David S. Millerfac9b832005-05-18 22:46:34 -07009811 tw32(GRC_LOCAL_CTRL,
9812 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9813 } else {
9814 tw32(HOSTCC_MODE, tp->coalesce_mode |
Matt Carlsonfd2ce372009-09-01 12:51:13 +00009815 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
David S. Millerfac9b832005-05-18 22:46:34 -07009816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009817
David S. Millerfac9b832005-05-18 22:46:34 -07009818 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
David S. Millerf47c11e2005-06-24 20:18:35 -07009819 spin_unlock(&tp->lock);
Matt Carlsondb219972011-11-04 09:15:03 +00009820 tg3_reset_task_schedule(tp);
Matt Carlson5b190622011-11-04 09:15:04 +00009821 goto restart_timer;
David S. Millerfac9b832005-05-18 22:46:34 -07009822 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009823 }
9824
Linus Torvalds1da177e2005-04-16 15:20:36 -07009825 /* This part only runs once per second. */
9826 if (!--tp->timer_counter) {
Joe Perches63c3a662011-04-26 08:12:10 +00009827 if (tg3_flag(tp, 5705_PLUS))
David S. Millerfac9b832005-05-18 22:46:34 -07009828 tg3_periodic_fetch_stats(tp);
9829
Matt Carlsonb0c59432011-05-19 12:12:48 +00009830 if (tp->setlpicnt && !--tp->setlpicnt)
9831 tg3_phy_eee_enable(tp);
Matt Carlson52b02d02010-10-14 10:37:41 +00009832
Joe Perches63c3a662011-04-26 08:12:10 +00009833 if (tg3_flag(tp, USE_LINKCHG_REG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009834 u32 mac_stat;
9835 int phy_event;
9836
9837 mac_stat = tr32(MAC_STATUS);
9838
9839 phy_event = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009840 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009841 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9842 phy_event = 1;
9843 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9844 phy_event = 1;
9845
9846 if (phy_event)
9847 tg3_setup_phy(tp, 0);
Joe Perches63c3a662011-04-26 08:12:10 +00009848 } else if (tg3_flag(tp, POLL_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009849 u32 mac_stat = tr32(MAC_STATUS);
9850 int need_setup = 0;
9851
9852 if (netif_carrier_ok(tp->dev) &&
9853 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9854 need_setup = 1;
9855 }
Matt Carlsonbe98da62010-07-11 09:31:46 +00009856 if (!netif_carrier_ok(tp->dev) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009857 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9858 MAC_STATUS_SIGNAL_DET))) {
9859 need_setup = 1;
9860 }
9861 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07009862 if (!tp->serdes_counter) {
9863 tw32_f(MAC_MODE,
9864 (tp->mac_mode &
9865 ~MAC_MODE_PORT_MODE_MASK));
9866 udelay(40);
9867 tw32_f(MAC_MODE, tp->mac_mode);
9868 udelay(40);
9869 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009870 tg3_setup_phy(tp, 0);
9871 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009872 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009873 tg3_flag(tp, 5780_CLASS)) {
Michael Chan747e8f82005-07-25 12:33:22 -07009874 tg3_serdes_parallel_detect(tp);
Matt Carlson57d8b882010-06-05 17:24:35 +00009875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009876
9877 tp->timer_counter = tp->timer_multiplier;
9878 }
9879
Michael Chan130b8e42006-09-27 16:00:40 -07009880 /* Heartbeat is only sent once every 2 seconds.
9881 *
9882 * The heartbeat is to tell the ASF firmware that the host
9883 * driver is still alive. In the event that the OS crashes,
9884 * ASF needs to reset the hardware to free up the FIFO space
9885 * that may be filled with rx packets destined for the host.
9886 * If the FIFO is full, ASF will no longer function properly.
9887 *
9888 * Unintended resets have been reported on real time kernels
9889 * where the timer doesn't run on time. Netpoll will also have
9890 * same problem.
9891 *
9892 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9893 * to check the ring condition when the heartbeat is expiring
9894 * before doing the reset. This will prevent most unintended
9895 * resets.
9896 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009897 if (!--tp->asf_counter) {
Joe Perches63c3a662011-04-26 08:12:10 +00009898 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07009899 tg3_wait_for_event_ack(tp);
9900
Michael Chanbbadf502006-04-06 21:46:34 -07009901 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07009902 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07009903 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Matt Carlsonc6cdf432010-04-05 10:19:26 +00009904 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9905 TG3_FW_UPDATE_TIMEOUT_SEC);
Matt Carlson4ba526c2008-08-15 14:10:04 -07009906
9907 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009908 }
9909 tp->asf_counter = tp->asf_multiplier;
9910 }
9911
David S. Millerf47c11e2005-06-24 20:18:35 -07009912 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009913
Michael Chanf475f162006-03-27 23:20:14 -08009914restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009915 tp->timer.expires = jiffies + tp->timer_offset;
9916 add_timer(&tp->timer);
9917}
9918
Matt Carlson21f76382012-02-22 12:35:21 +00009919static void __devinit tg3_timer_init(struct tg3 *tp)
9920{
9921 if (tg3_flag(tp, TAGGED_STATUS) &&
9922 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9923 !tg3_flag(tp, 57765_CLASS))
9924 tp->timer_offset = HZ;
9925 else
9926 tp->timer_offset = HZ / 10;
9927
9928 BUG_ON(tp->timer_offset > HZ);
9929
9930 tp->timer_multiplier = (HZ / tp->timer_offset);
9931 tp->asf_multiplier = (HZ / tp->timer_offset) *
9932 TG3_FW_UPDATE_FREQ_SEC;
9933
9934 init_timer(&tp->timer);
9935 tp->timer.data = (unsigned long) tp;
9936 tp->timer.function = tg3_timer;
9937}
9938
9939static void tg3_timer_start(struct tg3 *tp)
9940{
9941 tp->asf_counter = tp->asf_multiplier;
9942 tp->timer_counter = tp->timer_multiplier;
9943
9944 tp->timer.expires = jiffies + tp->timer_offset;
9945 add_timer(&tp->timer);
9946}
9947
9948static void tg3_timer_stop(struct tg3 *tp)
9949{
9950 del_timer_sync(&tp->timer);
9951}
9952
9953/* Restart hardware after configuration changes, self-test, etc.
9954 * Invoked with tp->lock held.
9955 */
9956static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9957 __releases(tp->lock)
9958 __acquires(tp->lock)
9959{
9960 int err;
9961
9962 err = tg3_init_hw(tp, reset_phy);
9963 if (err) {
9964 netdev_err(tp->dev,
9965 "Failed to re-initialize device, aborting\n");
9966 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9967 tg3_full_unlock(tp);
9968 tg3_timer_stop(tp);
9969 tp->irq_sync = 0;
9970 tg3_napi_enable(tp);
9971 dev_close(tp->dev);
9972 tg3_full_lock(tp, 0);
9973 }
9974 return err;
9975}
9976
9977static void tg3_reset_task(struct work_struct *work)
9978{
9979 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9980 int err;
9981
9982 tg3_full_lock(tp, 0);
9983
9984 if (!netif_running(tp->dev)) {
9985 tg3_flag_clear(tp, RESET_TASK_PENDING);
9986 tg3_full_unlock(tp);
9987 return;
9988 }
9989
9990 tg3_full_unlock(tp);
9991
9992 tg3_phy_stop(tp);
9993
9994 tg3_netif_stop(tp);
9995
9996 tg3_full_lock(tp, 1);
9997
9998 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9999 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10000 tp->write32_rx_mbox = tg3_write_flush_reg32;
10001 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10002 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10003 }
10004
10005 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10006 err = tg3_init_hw(tp, 1);
10007 if (err)
10008 goto out;
10009
10010 tg3_netif_start(tp);
10011
10012out:
10013 tg3_full_unlock(tp);
10014
10015 if (!err)
10016 tg3_phy_start(tp);
10017
10018 tg3_flag_clear(tp, RESET_TASK_PENDING);
10019}
10020
Matt Carlson4f125f42009-09-01 12:55:02 +000010021static int tg3_request_irq(struct tg3 *tp, int irq_num)
Michael Chanfcfa0a32006-03-20 22:28:41 -080010022{
David Howells7d12e782006-10-05 14:55:46 +010010023 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010024 unsigned long flags;
Matt Carlson4f125f42009-09-01 12:55:02 +000010025 char *name;
10026 struct tg3_napi *tnapi = &tp->napi[irq_num];
10027
10028 if (tp->irq_cnt == 1)
10029 name = tp->dev->name;
10030 else {
10031 name = &tnapi->irq_lbl[0];
10032 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10033 name[IFNAMSIZ-1] = 0;
10034 }
Michael Chanfcfa0a32006-03-20 22:28:41 -080010035
Joe Perches63c3a662011-04-26 08:12:10 +000010036 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
Michael Chanfcfa0a32006-03-20 22:28:41 -080010037 fn = tg3_msi;
Joe Perches63c3a662011-04-26 08:12:10 +000010038 if (tg3_flag(tp, 1SHOT_MSI))
Michael Chanfcfa0a32006-03-20 22:28:41 -080010039 fn = tg3_msi_1shot;
Javier Martinez Canillasab392d22011-03-28 16:27:31 +000010040 flags = 0;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010041 } else {
10042 fn = tg3_interrupt;
Joe Perches63c3a662011-04-26 08:12:10 +000010043 if (tg3_flag(tp, TAGGED_STATUS))
Michael Chanfcfa0a32006-03-20 22:28:41 -080010044 fn = tg3_interrupt_tagged;
Javier Martinez Canillasab392d22011-03-28 16:27:31 +000010045 flags = IRQF_SHARED;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010046 }
Matt Carlson4f125f42009-09-01 12:55:02 +000010047
10048 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010049}
10050
Michael Chan79381092005-04-21 17:13:59 -070010051static int tg3_test_interrupt(struct tg3 *tp)
10052{
Matt Carlson09943a12009-08-28 14:01:57 +000010053 struct tg3_napi *tnapi = &tp->napi[0];
Michael Chan79381092005-04-21 17:13:59 -070010054 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -070010055 int err, i, intr_ok = 0;
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010056 u32 val;
Michael Chan79381092005-04-21 17:13:59 -070010057
Michael Chand4bc3922005-05-29 14:59:20 -070010058 if (!netif_running(dev))
10059 return -ENODEV;
10060
Michael Chan79381092005-04-21 17:13:59 -070010061 tg3_disable_ints(tp);
10062
Matt Carlson4f125f42009-09-01 12:55:02 +000010063 free_irq(tnapi->irq_vec, tnapi);
Michael Chan79381092005-04-21 17:13:59 -070010064
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010065 /*
10066 * Turn off MSI one shot mode. Otherwise this test has no
10067 * observable way to know whether the interrupt was delivered.
10068 */
Matt Carlson3aa1cdf2011-07-20 10:20:55 +000010069 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010070 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10071 tw32(MSGINT_MODE, val);
10072 }
10073
Matt Carlson4f125f42009-09-01 12:55:02 +000010074 err = request_irq(tnapi->irq_vec, tg3_test_isr,
Davidlohr Buesof274fd92012-02-22 03:06:54 +000010075 IRQF_SHARED, dev->name, tnapi);
Michael Chan79381092005-04-21 17:13:59 -070010076 if (err)
10077 return err;
10078
Matt Carlson898a56f2009-08-28 14:02:40 +000010079 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -070010080 tg3_enable_ints(tp);
10081
10082 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000010083 tnapi->coal_now);
Michael Chan79381092005-04-21 17:13:59 -070010084
10085 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -070010086 u32 int_mbox, misc_host_ctrl;
10087
Matt Carlson898a56f2009-08-28 14:02:40 +000010088 int_mbox = tr32_mailbox(tnapi->int_mbox);
Michael Chanb16250e2006-09-27 16:10:14 -070010089 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10090
10091 if ((int_mbox != 0) ||
10092 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10093 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -070010094 break;
Michael Chanb16250e2006-09-27 16:10:14 -070010095 }
10096
Matt Carlson3aa1cdf2011-07-20 10:20:55 +000010097 if (tg3_flag(tp, 57765_PLUS) &&
10098 tnapi->hw_status->status_tag != tnapi->last_tag)
10099 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10100
Michael Chan79381092005-04-21 17:13:59 -070010101 msleep(10);
10102 }
10103
10104 tg3_disable_ints(tp);
10105
Matt Carlson4f125f42009-09-01 12:55:02 +000010106 free_irq(tnapi->irq_vec, tnapi);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010107
Matt Carlson4f125f42009-09-01 12:55:02 +000010108 err = tg3_request_irq(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -070010109
10110 if (err)
10111 return err;
10112
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010113 if (intr_ok) {
10114 /* Reenable MSI one shot mode. */
Matt Carlson5b39de92011-08-31 11:44:50 +000010115 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010116 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10117 tw32(MSGINT_MODE, val);
10118 }
Michael Chan79381092005-04-21 17:13:59 -070010119 return 0;
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010120 }
Michael Chan79381092005-04-21 17:13:59 -070010121
10122 return -EIO;
10123}
10124
10125/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10126 * successfully restored
10127 */
10128static int tg3_test_msi(struct tg3 *tp)
10129{
Michael Chan79381092005-04-21 17:13:59 -070010130 int err;
10131 u16 pci_cmd;
10132
Joe Perches63c3a662011-04-26 08:12:10 +000010133 if (!tg3_flag(tp, USING_MSI))
Michael Chan79381092005-04-21 17:13:59 -070010134 return 0;
10135
10136 /* Turn off SERR reporting in case MSI terminates with Master
10137 * Abort.
10138 */
10139 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10140 pci_write_config_word(tp->pdev, PCI_COMMAND,
10141 pci_cmd & ~PCI_COMMAND_SERR);
10142
10143 err = tg3_test_interrupt(tp);
10144
10145 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10146
10147 if (!err)
10148 return 0;
10149
10150 /* other failures */
10151 if (err != -EIO)
10152 return err;
10153
10154 /* MSI test failed, go back to INTx mode */
Matt Carlson5129c3a2010-04-05 10:19:23 +000010155 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10156 "to INTx mode. Please report this failure to the PCI "
10157 "maintainer and include system chipset information\n");
Michael Chan79381092005-04-21 17:13:59 -070010158
Matt Carlson4f125f42009-09-01 12:55:02 +000010159 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
Matt Carlson09943a12009-08-28 14:01:57 +000010160
Michael Chan79381092005-04-21 17:13:59 -070010161 pci_disable_msi(tp->pdev);
10162
Joe Perches63c3a662011-04-26 08:12:10 +000010163 tg3_flag_clear(tp, USING_MSI);
Andre Detschdc8bf1b2010-04-26 07:27:07 +000010164 tp->napi[0].irq_vec = tp->pdev->irq;
Michael Chan79381092005-04-21 17:13:59 -070010165
Matt Carlson4f125f42009-09-01 12:55:02 +000010166 err = tg3_request_irq(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -070010167 if (err)
10168 return err;
10169
10170 /* Need to reset the chip because the MSI cycle may have terminated
10171 * with Master Abort.
10172 */
David S. Millerf47c11e2005-06-24 20:18:35 -070010173 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -070010174
Michael Chan944d9802005-05-29 14:57:48 -070010175 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070010176 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -070010177
David S. Millerf47c11e2005-06-24 20:18:35 -070010178 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -070010179
10180 if (err)
Matt Carlson4f125f42009-09-01 12:55:02 +000010181 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
Michael Chan79381092005-04-21 17:13:59 -070010182
10183 return err;
10184}
10185
Matt Carlson9e9fd122009-01-19 16:57:45 -080010186static int tg3_request_firmware(struct tg3 *tp)
10187{
10188 const __be32 *fw_data;
10189
10190 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
Joe Perches05dbe002010-02-17 19:44:19 +000010191 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10192 tp->fw_needed);
Matt Carlson9e9fd122009-01-19 16:57:45 -080010193 return -ENOENT;
10194 }
10195
10196 fw_data = (void *)tp->fw->data;
10197
10198 /* Firmware blob starts with version numbers, followed by
10199 * start address and _full_ length including BSS sections
10200 * (which must be longer than the actual data, of course
10201 */
10202
10203 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10204 if (tp->fw_len < (tp->fw->size - 12)) {
Joe Perches05dbe002010-02-17 19:44:19 +000010205 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10206 tp->fw_len, tp->fw_needed);
Matt Carlson9e9fd122009-01-19 16:57:45 -080010207 release_firmware(tp->fw);
10208 tp->fw = NULL;
10209 return -EINVAL;
10210 }
10211
10212 /* We no longer need firmware; we have it. */
10213 tp->fw_needed = NULL;
10214 return 0;
10215}
10216
Michael Chan91024262012-09-28 07:12:38 +000010217static u32 tg3_irq_count(struct tg3 *tp)
Matt Carlson679563f2009-09-01 12:55:46 +000010218{
Michael Chan91024262012-09-28 07:12:38 +000010219 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
Matt Carlson679563f2009-09-01 12:55:46 +000010220
Michael Chan91024262012-09-28 07:12:38 +000010221 if (irq_cnt > 1) {
Matt Carlsonc3b50032012-01-17 15:27:23 +000010222 /* We want as many rx rings enabled as there are cpus.
10223 * In multiqueue MSI-X mode, the first MSI-X vector
10224 * only deals with link interrupts, etc, so we add
10225 * one to the number of vectors we are requesting.
10226 */
Michael Chan91024262012-09-28 07:12:38 +000010227 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
Matt Carlsonc3b50032012-01-17 15:27:23 +000010228 }
Matt Carlson679563f2009-09-01 12:55:46 +000010229
Michael Chan91024262012-09-28 07:12:38 +000010230 return irq_cnt;
10231}
10232
10233static bool tg3_enable_msix(struct tg3 *tp)
10234{
10235 int i, rc;
Michael Chan86449942012-10-02 20:31:14 -070010236 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
Michael Chan91024262012-09-28 07:12:38 +000010237
Michael Chan09681692012-09-28 07:12:42 +000010238 tp->txq_cnt = tp->txq_req;
10239 tp->rxq_cnt = tp->rxq_req;
10240 if (!tp->rxq_cnt)
10241 tp->rxq_cnt = netif_get_num_default_rss_queues();
Michael Chan91024262012-09-28 07:12:38 +000010242 if (tp->rxq_cnt > tp->rxq_max)
10243 tp->rxq_cnt = tp->rxq_max;
Michael Chancf6d6ea2012-09-28 07:12:43 +000010244
10245 /* Disable multiple TX rings by default. Simple round-robin hardware
10246 * scheduling of the TX rings can cause starvation of rings with
10247 * small packets when other rings have TSO or jumbo packets.
10248 */
10249 if (!tp->txq_req)
10250 tp->txq_cnt = 1;
Michael Chan91024262012-09-28 07:12:38 +000010251
10252 tp->irq_cnt = tg3_irq_count(tp);
10253
Matt Carlson679563f2009-09-01 12:55:46 +000010254 for (i = 0; i < tp->irq_max; i++) {
10255 msix_ent[i].entry = i;
10256 msix_ent[i].vector = 0;
10257 }
10258
10259 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
Matt Carlson2430b032010-06-05 17:24:34 +000010260 if (rc < 0) {
10261 return false;
10262 } else if (rc != 0) {
Matt Carlson679563f2009-09-01 12:55:46 +000010263 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10264 return false;
Joe Perches05dbe002010-02-17 19:44:19 +000010265 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10266 tp->irq_cnt, rc);
Matt Carlson679563f2009-09-01 12:55:46 +000010267 tp->irq_cnt = rc;
Michael Chan49a359e2012-09-28 07:12:37 +000010268 tp->rxq_cnt = max(rc - 1, 1);
Michael Chan91024262012-09-28 07:12:38 +000010269 if (tp->txq_cnt)
10270 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
Matt Carlson679563f2009-09-01 12:55:46 +000010271 }
10272
10273 for (i = 0; i < tp->irq_max; i++)
10274 tp->napi[i].irq_vec = msix_ent[i].vector;
10275
Michael Chan49a359e2012-09-28 07:12:37 +000010276 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
Ben Hutchings2ddaad32010-09-27 22:11:51 -070010277 pci_disable_msix(tp->pdev);
10278 return false;
10279 }
Matt Carlsonb92b9042010-11-24 08:31:51 +000010280
Michael Chan91024262012-09-28 07:12:38 +000010281 if (tp->irq_cnt == 1)
10282 return true;
Matt Carlsond78b59f2011-04-05 14:22:46 +000010283
Michael Chan91024262012-09-28 07:12:38 +000010284 tg3_flag_set(tp, ENABLE_RSS);
10285
10286 if (tp->txq_cnt > 1)
10287 tg3_flag_set(tp, ENABLE_TSS);
10288
10289 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
Matt Carlson2430b032010-06-05 17:24:34 +000010290
Matt Carlson679563f2009-09-01 12:55:46 +000010291 return true;
10292}
10293
Matt Carlson07b01732009-08-28 14:01:15 +000010294static void tg3_ints_init(struct tg3 *tp)
10295{
Joe Perches63c3a662011-04-26 08:12:10 +000010296 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10297 !tg3_flag(tp, TAGGED_STATUS)) {
Matt Carlson07b01732009-08-28 14:01:15 +000010298 /* All MSI supporting chips should support tagged
10299 * status. Assert that this is the case.
10300 */
Matt Carlson5129c3a2010-04-05 10:19:23 +000010301 netdev_warn(tp->dev,
10302 "MSI without TAGGED_STATUS? Not using MSI\n");
Matt Carlson679563f2009-09-01 12:55:46 +000010303 goto defcfg;
Matt Carlson07b01732009-08-28 14:01:15 +000010304 }
Matt Carlson4f125f42009-09-01 12:55:02 +000010305
Joe Perches63c3a662011-04-26 08:12:10 +000010306 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10307 tg3_flag_set(tp, USING_MSIX);
10308 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10309 tg3_flag_set(tp, USING_MSI);
Matt Carlson679563f2009-09-01 12:55:46 +000010310
Joe Perches63c3a662011-04-26 08:12:10 +000010311 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
Matt Carlson679563f2009-09-01 12:55:46 +000010312 u32 msi_mode = tr32(MSGINT_MODE);
Joe Perches63c3a662011-04-26 08:12:10 +000010313 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
Matt Carlsonbaf8a942009-09-01 13:13:00 +000010314 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
Matt Carlson5b39de92011-08-31 11:44:50 +000010315 if (!tg3_flag(tp, 1SHOT_MSI))
10316 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
Matt Carlson679563f2009-09-01 12:55:46 +000010317 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10318 }
10319defcfg:
Joe Perches63c3a662011-04-26 08:12:10 +000010320 if (!tg3_flag(tp, USING_MSIX)) {
Matt Carlson679563f2009-09-01 12:55:46 +000010321 tp->irq_cnt = 1;
10322 tp->napi[0].irq_vec = tp->pdev->irq;
Michael Chan49a359e2012-09-28 07:12:37 +000010323 }
10324
10325 if (tp->irq_cnt == 1) {
10326 tp->txq_cnt = 1;
10327 tp->rxq_cnt = 1;
Ben Hutchings2ddaad32010-09-27 22:11:51 -070010328 netif_set_real_num_tx_queues(tp->dev, 1);
Matt Carlson85407882010-10-06 13:40:58 -070010329 netif_set_real_num_rx_queues(tp->dev, 1);
Matt Carlson679563f2009-09-01 12:55:46 +000010330 }
Matt Carlson07b01732009-08-28 14:01:15 +000010331}
10332
10333static void tg3_ints_fini(struct tg3 *tp)
10334{
Joe Perches63c3a662011-04-26 08:12:10 +000010335 if (tg3_flag(tp, USING_MSIX))
Matt Carlson679563f2009-09-01 12:55:46 +000010336 pci_disable_msix(tp->pdev);
Joe Perches63c3a662011-04-26 08:12:10 +000010337 else if (tg3_flag(tp, USING_MSI))
Matt Carlson679563f2009-09-01 12:55:46 +000010338 pci_disable_msi(tp->pdev);
Joe Perches63c3a662011-04-26 08:12:10 +000010339 tg3_flag_clear(tp, USING_MSI);
10340 tg3_flag_clear(tp, USING_MSIX);
10341 tg3_flag_clear(tp, ENABLE_RSS);
10342 tg3_flag_clear(tp, ENABLE_TSS);
Matt Carlson07b01732009-08-28 14:01:15 +000010343}
10344
Michael Chand8f4cd32012-09-28 07:12:40 +000010345static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010346{
Michael Chand8f4cd32012-09-28 07:12:40 +000010347 struct net_device *dev = tp->dev;
Matt Carlson4f125f42009-09-01 12:55:02 +000010348 int i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010349
Matt Carlson679563f2009-09-01 12:55:46 +000010350 /*
10351 * Setup interrupts first so we know how
10352 * many NAPI resources to allocate
10353 */
10354 tg3_ints_init(tp);
10355
Matt Carlson90415472011-12-16 13:33:23 +000010356 tg3_rss_check_indir_tbl(tp);
Matt Carlsonbcebcc42011-12-14 11:10:01 +000010357
Linus Torvalds1da177e2005-04-16 15:20:36 -070010358 /* The placement of this call is tied
10359 * to the setup and use of Host TX descriptors.
10360 */
10361 err = tg3_alloc_consistent(tp);
10362 if (err)
Matt Carlson679563f2009-09-01 12:55:46 +000010363 goto err_out1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010364
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010365 tg3_napi_init(tp);
10366
Matt Carlsonfed97812009-09-01 13:10:19 +000010367 tg3_napi_enable(tp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -070010368
Matt Carlson4f125f42009-09-01 12:55:02 +000010369 for (i = 0; i < tp->irq_cnt; i++) {
10370 struct tg3_napi *tnapi = &tp->napi[i];
10371 err = tg3_request_irq(tp, i);
10372 if (err) {
Matt Carlson5bc09182011-11-04 09:15:01 +000010373 for (i--; i >= 0; i--) {
10374 tnapi = &tp->napi[i];
Matt Carlson4f125f42009-09-01 12:55:02 +000010375 free_irq(tnapi->irq_vec, tnapi);
Matt Carlson5bc09182011-11-04 09:15:01 +000010376 }
10377 goto err_out2;
Matt Carlson4f125f42009-09-01 12:55:02 +000010378 }
10379 }
Matt Carlson07b01732009-08-28 14:01:15 +000010380
David S. Millerf47c11e2005-06-24 20:18:35 -070010381 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010382
Michael Chand8f4cd32012-09-28 07:12:40 +000010383 err = tg3_init_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010384 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -070010385 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010386 tg3_free_rings(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010387 }
10388
David S. Millerf47c11e2005-06-24 20:18:35 -070010389 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010390
Matt Carlson07b01732009-08-28 14:01:15 +000010391 if (err)
Matt Carlson679563f2009-09-01 12:55:46 +000010392 goto err_out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010393
Michael Chand8f4cd32012-09-28 07:12:40 +000010394 if (test_irq && tg3_flag(tp, USING_MSI)) {
Michael Chan79381092005-04-21 17:13:59 -070010395 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -070010396
Michael Chan79381092005-04-21 17:13:59 -070010397 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070010398 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070010399 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -070010400 tg3_free_rings(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070010401 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -070010402
Matt Carlson679563f2009-09-01 12:55:46 +000010403 goto err_out2;
Michael Chan79381092005-04-21 17:13:59 -070010404 }
Michael Chanfcfa0a32006-03-20 22:28:41 -080010405
Joe Perches63c3a662011-04-26 08:12:10 +000010406 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010407 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010408
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010409 tw32(PCIE_TRANSACTION_CFG,
10410 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010411 }
Michael Chan79381092005-04-21 17:13:59 -070010412 }
10413
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010414 tg3_phy_start(tp);
10415
Michael Chanaed93e02012-07-16 16:24:02 +000010416 tg3_hwmon_open(tp);
10417
David S. Millerf47c11e2005-06-24 20:18:35 -070010418 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010419
Matt Carlson21f76382012-02-22 12:35:21 +000010420 tg3_timer_start(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000010421 tg3_flag_set(tp, INIT_COMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010422 tg3_enable_ints(tp);
10423
David S. Millerf47c11e2005-06-24 20:18:35 -070010424 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010425
Matt Carlsonfe5f5782009-09-01 13:09:39 +000010426 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010427
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000010428 /*
10429 * Reset loopback feature if it was turned on while the device was down
10430 * make sure that it's installed properly now.
10431 */
10432 if (dev->features & NETIF_F_LOOPBACK)
10433 tg3_set_loopback(dev, dev->features);
10434
Linus Torvalds1da177e2005-04-16 15:20:36 -070010435 return 0;
Matt Carlson07b01732009-08-28 14:01:15 +000010436
Matt Carlson679563f2009-09-01 12:55:46 +000010437err_out3:
Matt Carlson4f125f42009-09-01 12:55:02 +000010438 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10439 struct tg3_napi *tnapi = &tp->napi[i];
10440 free_irq(tnapi->irq_vec, tnapi);
10441 }
Matt Carlson07b01732009-08-28 14:01:15 +000010442
Matt Carlson679563f2009-09-01 12:55:46 +000010443err_out2:
Matt Carlsonfed97812009-09-01 13:10:19 +000010444 tg3_napi_disable(tp);
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010445 tg3_napi_fini(tp);
Matt Carlson07b01732009-08-28 14:01:15 +000010446 tg3_free_consistent(tp);
Matt Carlson679563f2009-09-01 12:55:46 +000010447
10448err_out1:
10449 tg3_ints_fini(tp);
Michael Chand8f4cd32012-09-28 07:12:40 +000010450
Matt Carlson07b01732009-08-28 14:01:15 +000010451 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010452}
10453
Michael Chan65138592012-09-28 07:12:41 +000010454static void tg3_stop(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010455{
Matt Carlson4f125f42009-09-01 12:55:02 +000010456 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010457
Matt Carlsondb219972011-11-04 09:15:03 +000010458 tg3_reset_task_cancel(tp);
Nithin Nayak Sujirbd473da2012-11-05 14:26:30 +000010459 tg3_netif_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010460
Matt Carlson21f76382012-02-22 12:35:21 +000010461 tg3_timer_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010462
Michael Chanaed93e02012-07-16 16:24:02 +000010463 tg3_hwmon_close(tp);
10464
Matt Carlson24bb4fb2009-10-05 17:55:29 +000010465 tg3_phy_stop(tp);
10466
David S. Millerf47c11e2005-06-24 20:18:35 -070010467 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010468
10469 tg3_disable_ints(tp);
10470
Michael Chan944d9802005-05-29 14:57:48 -070010471 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010472 tg3_free_rings(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000010473 tg3_flag_clear(tp, INIT_COMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010474
David S. Millerf47c11e2005-06-24 20:18:35 -070010475 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010476
Matt Carlson4f125f42009-09-01 12:55:02 +000010477 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10478 struct tg3_napi *tnapi = &tp->napi[i];
10479 free_irq(tnapi->irq_vec, tnapi);
10480 }
Matt Carlson07b01732009-08-28 14:01:15 +000010481
10482 tg3_ints_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010483
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010484 tg3_napi_fini(tp);
10485
Linus Torvalds1da177e2005-04-16 15:20:36 -070010486 tg3_free_consistent(tp);
Michael Chan65138592012-09-28 07:12:41 +000010487}
10488
Michael Chand8f4cd32012-09-28 07:12:40 +000010489static int tg3_open(struct net_device *dev)
10490{
10491 struct tg3 *tp = netdev_priv(dev);
10492 int err;
10493
10494 if (tp->fw_needed) {
10495 err = tg3_request_firmware(tp);
10496 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10497 if (err)
10498 return err;
10499 } else if (err) {
10500 netdev_warn(tp->dev, "TSO capability disabled\n");
10501 tg3_flag_clear(tp, TSO_CAPABLE);
10502 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10503 netdev_notice(tp->dev, "TSO capability restored\n");
10504 tg3_flag_set(tp, TSO_CAPABLE);
10505 }
10506 }
10507
10508 netif_carrier_off(tp->dev);
10509
10510 err = tg3_power_up(tp);
10511 if (err)
10512 return err;
10513
10514 tg3_full_lock(tp, 0);
10515
10516 tg3_disable_ints(tp);
10517 tg3_flag_clear(tp, INIT_COMPLETE);
10518
10519 tg3_full_unlock(tp);
10520
10521 err = tg3_start(tp, true, true);
10522 if (err) {
10523 tg3_frob_aux_power(tp, false);
10524 pci_set_power_state(tp->pdev, PCI_D3hot);
10525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010526 return err;
10527}
10528
10529static int tg3_close(struct net_device *dev)
10530{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010531 struct tg3 *tp = netdev_priv(dev);
10532
Michael Chan65138592012-09-28 07:12:41 +000010533 tg3_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010534
10535 /* Clear stats across close / open calls */
10536 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10537 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010538
10539 tg3_power_down(tp);
10540
10541 netif_carrier_off(tp->dev);
10542
10543 return 0;
10544}
10545
10546static inline u64 get_stat64(tg3_stat64_t *val)
10547{
10548 return ((u64)val->high << 32) | ((u64)val->low);
10549}
10550
10551static u64 tg3_calc_crc_errors(struct tg3 *tp)
10552{
10553 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10554
10555 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10556 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10558 u32 val;
10559
10560 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10561 tg3_writephy(tp, MII_TG3_TEST1,
10562 val | MII_TG3_TEST1_CRC_EN);
10563 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10564 } else
10565 val = 0;
10566
10567 tp->phy_crc_errors += val;
10568
10569 return tp->phy_crc_errors;
10570 }
10571
10572 return get_stat64(&hw_stats->rx_fcs_errors);
10573}
10574
10575#define ESTAT_ADD(member) \
10576 estats->member = old_estats->member + \
10577 get_stat64(&hw_stats->member)
10578
10579static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10580{
10581 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10582 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10583
10584 ESTAT_ADD(rx_octets);
10585 ESTAT_ADD(rx_fragments);
10586 ESTAT_ADD(rx_ucast_packets);
10587 ESTAT_ADD(rx_mcast_packets);
10588 ESTAT_ADD(rx_bcast_packets);
10589 ESTAT_ADD(rx_fcs_errors);
10590 ESTAT_ADD(rx_align_errors);
10591 ESTAT_ADD(rx_xon_pause_rcvd);
10592 ESTAT_ADD(rx_xoff_pause_rcvd);
10593 ESTAT_ADD(rx_mac_ctrl_rcvd);
10594 ESTAT_ADD(rx_xoff_entered);
10595 ESTAT_ADD(rx_frame_too_long_errors);
10596 ESTAT_ADD(rx_jabbers);
10597 ESTAT_ADD(rx_undersize_packets);
10598 ESTAT_ADD(rx_in_length_errors);
10599 ESTAT_ADD(rx_out_length_errors);
10600 ESTAT_ADD(rx_64_or_less_octet_packets);
10601 ESTAT_ADD(rx_65_to_127_octet_packets);
10602 ESTAT_ADD(rx_128_to_255_octet_packets);
10603 ESTAT_ADD(rx_256_to_511_octet_packets);
10604 ESTAT_ADD(rx_512_to_1023_octet_packets);
10605 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10606 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10607 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10608 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10609 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10610
10611 ESTAT_ADD(tx_octets);
10612 ESTAT_ADD(tx_collisions);
10613 ESTAT_ADD(tx_xon_sent);
10614 ESTAT_ADD(tx_xoff_sent);
10615 ESTAT_ADD(tx_flow_control);
10616 ESTAT_ADD(tx_mac_errors);
10617 ESTAT_ADD(tx_single_collisions);
10618 ESTAT_ADD(tx_mult_collisions);
10619 ESTAT_ADD(tx_deferred);
10620 ESTAT_ADD(tx_excessive_collisions);
10621 ESTAT_ADD(tx_late_collisions);
10622 ESTAT_ADD(tx_collide_2times);
10623 ESTAT_ADD(tx_collide_3times);
10624 ESTAT_ADD(tx_collide_4times);
10625 ESTAT_ADD(tx_collide_5times);
10626 ESTAT_ADD(tx_collide_6times);
10627 ESTAT_ADD(tx_collide_7times);
10628 ESTAT_ADD(tx_collide_8times);
10629 ESTAT_ADD(tx_collide_9times);
10630 ESTAT_ADD(tx_collide_10times);
10631 ESTAT_ADD(tx_collide_11times);
10632 ESTAT_ADD(tx_collide_12times);
10633 ESTAT_ADD(tx_collide_13times);
10634 ESTAT_ADD(tx_collide_14times);
10635 ESTAT_ADD(tx_collide_15times);
10636 ESTAT_ADD(tx_ucast_packets);
10637 ESTAT_ADD(tx_mcast_packets);
10638 ESTAT_ADD(tx_bcast_packets);
10639 ESTAT_ADD(tx_carrier_sense_errors);
10640 ESTAT_ADD(tx_discards);
10641 ESTAT_ADD(tx_errors);
10642
10643 ESTAT_ADD(dma_writeq_full);
10644 ESTAT_ADD(dma_write_prioq_full);
10645 ESTAT_ADD(rxbds_empty);
10646 ESTAT_ADD(rx_discards);
10647 ESTAT_ADD(rx_errors);
10648 ESTAT_ADD(rx_threshold_hit);
10649
10650 ESTAT_ADD(dma_readq_full);
10651 ESTAT_ADD(dma_read_prioq_full);
10652 ESTAT_ADD(tx_comp_queue_full);
10653
10654 ESTAT_ADD(ring_set_send_prod_index);
10655 ESTAT_ADD(ring_status_update);
10656 ESTAT_ADD(nic_irqs);
10657 ESTAT_ADD(nic_avoided_irqs);
10658 ESTAT_ADD(nic_tx_threshold_hit);
10659
Matt Carlson4452d092011-05-19 12:12:51 +000010660 ESTAT_ADD(mbuf_lwm_thresh_hit);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010661}
10662
Matt Carlson65ec6982012-02-28 23:33:37 +000010663static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010664{
Eric Dumazet511d2222010-07-07 20:44:24 +000010665 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010666 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10667
Linus Torvalds1da177e2005-04-16 15:20:36 -070010668 stats->rx_packets = old_stats->rx_packets +
10669 get_stat64(&hw_stats->rx_ucast_packets) +
10670 get_stat64(&hw_stats->rx_mcast_packets) +
10671 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010672
Linus Torvalds1da177e2005-04-16 15:20:36 -070010673 stats->tx_packets = old_stats->tx_packets +
10674 get_stat64(&hw_stats->tx_ucast_packets) +
10675 get_stat64(&hw_stats->tx_mcast_packets) +
10676 get_stat64(&hw_stats->tx_bcast_packets);
10677
10678 stats->rx_bytes = old_stats->rx_bytes +
10679 get_stat64(&hw_stats->rx_octets);
10680 stats->tx_bytes = old_stats->tx_bytes +
10681 get_stat64(&hw_stats->tx_octets);
10682
10683 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -070010684 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010685 stats->tx_errors = old_stats->tx_errors +
10686 get_stat64(&hw_stats->tx_errors) +
10687 get_stat64(&hw_stats->tx_mac_errors) +
10688 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10689 get_stat64(&hw_stats->tx_discards);
10690
10691 stats->multicast = old_stats->multicast +
10692 get_stat64(&hw_stats->rx_mcast_packets);
10693 stats->collisions = old_stats->collisions +
10694 get_stat64(&hw_stats->tx_collisions);
10695
10696 stats->rx_length_errors = old_stats->rx_length_errors +
10697 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10698 get_stat64(&hw_stats->rx_undersize_packets);
10699
10700 stats->rx_over_errors = old_stats->rx_over_errors +
10701 get_stat64(&hw_stats->rxbds_empty);
10702 stats->rx_frame_errors = old_stats->rx_frame_errors +
10703 get_stat64(&hw_stats->rx_align_errors);
10704 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10705 get_stat64(&hw_stats->tx_discards);
10706 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10707 get_stat64(&hw_stats->tx_carrier_sense_errors);
10708
10709 stats->rx_crc_errors = old_stats->rx_crc_errors +
Matt Carlson65ec6982012-02-28 23:33:37 +000010710 tg3_calc_crc_errors(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010711
John W. Linville4f63b872005-09-12 14:43:18 -070010712 stats->rx_missed_errors = old_stats->rx_missed_errors +
10713 get_stat64(&hw_stats->rx_discards);
10714
Eric Dumazetb0057c52010-10-10 19:55:52 +000010715 stats->rx_dropped = tp->rx_dropped;
Eric Dumazet48855432011-10-24 07:53:03 +000010716 stats->tx_dropped = tp->tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010717}
10718
Linus Torvalds1da177e2005-04-16 15:20:36 -070010719static int tg3_get_regs_len(struct net_device *dev)
10720{
Matt Carlson97bd8e42011-04-13 11:05:04 +000010721 return TG3_REG_BLK_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010722}
10723
10724static void tg3_get_regs(struct net_device *dev,
10725 struct ethtool_regs *regs, void *_p)
10726{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010727 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010728
10729 regs->version = 0;
10730
Matt Carlson97bd8e42011-04-13 11:05:04 +000010731 memset(_p, 0, TG3_REG_BLK_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010732
Matt Carlson800960682010-08-02 11:26:06 +000010733 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080010734 return;
10735
David S. Millerf47c11e2005-06-24 20:18:35 -070010736 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010737
Matt Carlson97bd8e42011-04-13 11:05:04 +000010738 tg3_dump_legacy_regs(tp, (u32 *)_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010739
David S. Millerf47c11e2005-06-24 20:18:35 -070010740 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010741}
10742
10743static int tg3_get_eeprom_len(struct net_device *dev)
10744{
10745 struct tg3 *tp = netdev_priv(dev);
10746
10747 return tp->nvram_size;
10748}
10749
Linus Torvalds1da177e2005-04-16 15:20:36 -070010750static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10751{
10752 struct tg3 *tp = netdev_priv(dev);
10753 int ret;
10754 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -080010755 u32 i, offset, len, b_offset, b_count;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010756 __be32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010757
Joe Perches63c3a662011-04-26 08:12:10 +000010758 if (tg3_flag(tp, NO_NVRAM))
Matt Carlsondf259d82009-04-20 06:57:14 +000010759 return -EINVAL;
10760
Matt Carlson800960682010-08-02 11:26:06 +000010761 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080010762 return -EAGAIN;
10763
Linus Torvalds1da177e2005-04-16 15:20:36 -070010764 offset = eeprom->offset;
10765 len = eeprom->len;
10766 eeprom->len = 0;
10767
10768 eeprom->magic = TG3_EEPROM_MAGIC;
10769
10770 if (offset & 3) {
10771 /* adjustments to start on required 4 byte boundary */
10772 b_offset = offset & 3;
10773 b_count = 4 - b_offset;
10774 if (b_count > len) {
10775 /* i.e. offset=1 len=2 */
10776 b_count = len;
10777 }
Matt Carlsona9dc5292009-02-25 14:25:30 +000010778 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010779 if (ret)
10780 return ret;
Matt Carlsonbe98da62010-07-11 09:31:46 +000010781 memcpy(data, ((char *)&val) + b_offset, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010782 len -= b_count;
10783 offset += b_count;
Matt Carlsonc6cdf432010-04-05 10:19:26 +000010784 eeprom->len += b_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010785 }
10786
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010787 /* read bytes up to the last 4 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010788 pd = &data[eeprom->len];
10789 for (i = 0; i < (len - (len & 3)); i += 4) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000010790 ret = tg3_nvram_read_be32(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010791 if (ret) {
10792 eeprom->len += i;
10793 return ret;
10794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010795 memcpy(pd + i, &val, 4);
10796 }
10797 eeprom->len += i;
10798
10799 if (len & 3) {
10800 /* read last bytes not ending on 4 byte boundary */
10801 pd = &data[eeprom->len];
10802 b_count = len & 3;
10803 b_offset = offset + len - b_count;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010804 ret = tg3_nvram_read_be32(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010805 if (ret)
10806 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -080010807 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010808 eeprom->len += b_count;
10809 }
10810 return 0;
10811}
10812
Linus Torvalds1da177e2005-04-16 15:20:36 -070010813static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10814{
10815 struct tg3 *tp = netdev_priv(dev);
10816 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -080010817 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010818 u8 *buf;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010819 __be32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010820
Matt Carlson800960682010-08-02 11:26:06 +000010821 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080010822 return -EAGAIN;
10823
Joe Perches63c3a662011-04-26 08:12:10 +000010824 if (tg3_flag(tp, NO_NVRAM) ||
Matt Carlsondf259d82009-04-20 06:57:14 +000010825 eeprom->magic != TG3_EEPROM_MAGIC)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010826 return -EINVAL;
10827
10828 offset = eeprom->offset;
10829 len = eeprom->len;
10830
10831 if ((b_offset = (offset & 3))) {
10832 /* adjustments to start on required 4 byte boundary */
Matt Carlsona9dc5292009-02-25 14:25:30 +000010833 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010834 if (ret)
10835 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010836 len += b_offset;
10837 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -070010838 if (len < 4)
10839 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010840 }
10841
10842 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -070010843 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010844 /* adjustments to end on required 4 byte boundary */
10845 odd_len = 1;
10846 len = (len + 3) & ~3;
Matt Carlsona9dc5292009-02-25 14:25:30 +000010847 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010848 if (ret)
10849 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010850 }
10851
10852 buf = data;
10853 if (b_offset || odd_len) {
10854 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010010855 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010856 return -ENOMEM;
10857 if (b_offset)
10858 memcpy(buf, &start, 4);
10859 if (odd_len)
10860 memcpy(buf+len-4, &end, 4);
10861 memcpy(buf + b_offset, data, eeprom->len);
10862 }
10863
10864 ret = tg3_nvram_write_block(tp, offset, len, buf);
10865
10866 if (buf != data)
10867 kfree(buf);
10868
10869 return ret;
10870}
10871
10872static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10873{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010874 struct tg3 *tp = netdev_priv(dev);
10875
Joe Perches63c3a662011-04-26 08:12:10 +000010876 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010877 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010878 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010879 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010880 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10881 return phy_ethtool_gset(phydev, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010882 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010883
Linus Torvalds1da177e2005-04-16 15:20:36 -070010884 cmd->supported = (SUPPORTED_Autoneg);
10885
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010886 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010887 cmd->supported |= (SUPPORTED_1000baseT_Half |
10888 SUPPORTED_1000baseT_Full);
10889
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010890 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010891 cmd->supported |= (SUPPORTED_100baseT_Half |
10892 SUPPORTED_100baseT_Full |
10893 SUPPORTED_10baseT_Half |
10894 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -080010895 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -070010896 cmd->port = PORT_TP;
10897 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010898 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -070010899 cmd->port = PORT_FIBRE;
10900 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010901
Linus Torvalds1da177e2005-04-16 15:20:36 -070010902 cmd->advertising = tp->link_config.advertising;
Matt Carlson5bb09772011-06-13 13:39:00 +000010903 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10904 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10905 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10906 cmd->advertising |= ADVERTISED_Pause;
10907 } else {
10908 cmd->advertising |= ADVERTISED_Pause |
10909 ADVERTISED_Asym_Pause;
10910 }
10911 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10912 cmd->advertising |= ADVERTISED_Asym_Pause;
10913 }
10914 }
Matt Carlson859edb22011-12-08 14:40:16 +000010915 if (netif_running(dev) && netif_carrier_ok(dev)) {
David Decotigny70739492011-04-27 18:32:40 +000010916 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010917 cmd->duplex = tp->link_config.active_duplex;
Matt Carlson859edb22011-12-08 14:40:16 +000010918 cmd->lp_advertising = tp->link_config.rmt_adv;
Matt Carlsone348c5e2011-11-21 15:01:20 +000010919 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10920 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10921 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10922 else
10923 cmd->eth_tp_mdix = ETH_TP_MDI;
10924 }
Matt Carlson64c22182010-10-14 10:37:44 +000010925 } else {
Matt Carlsone7405222012-02-13 15:20:16 +000010926 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10927 cmd->duplex = DUPLEX_UNKNOWN;
Matt Carlsone348c5e2011-11-21 15:01:20 +000010928 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010929 }
Matt Carlson882e9792009-09-01 13:21:36 +000010930 cmd->phy_address = tp->phy_addr;
Matt Carlson7e5856b2009-02-25 14:23:01 +000010931 cmd->transceiver = XCVR_INTERNAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010932 cmd->autoneg = tp->link_config.autoneg;
10933 cmd->maxtxpkt = 0;
10934 cmd->maxrxpkt = 0;
10935 return 0;
10936}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010937
Linus Torvalds1da177e2005-04-16 15:20:36 -070010938static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10939{
10940 struct tg3 *tp = netdev_priv(dev);
David Decotigny25db0332011-04-27 18:32:39 +000010941 u32 speed = ethtool_cmd_speed(cmd);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010942
Joe Perches63c3a662011-04-26 08:12:10 +000010943 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010944 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010945 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010946 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000010947 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10948 return phy_ethtool_sset(phydev, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010949 }
10950
Matt Carlson7e5856b2009-02-25 14:23:01 +000010951 if (cmd->autoneg != AUTONEG_ENABLE &&
10952 cmd->autoneg != AUTONEG_DISABLE)
Michael Chan37ff2382005-10-26 15:49:51 -070010953 return -EINVAL;
Matt Carlson7e5856b2009-02-25 14:23:01 +000010954
10955 if (cmd->autoneg == AUTONEG_DISABLE &&
10956 cmd->duplex != DUPLEX_FULL &&
10957 cmd->duplex != DUPLEX_HALF)
Michael Chan37ff2382005-10-26 15:49:51 -070010958 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010959
Matt Carlson7e5856b2009-02-25 14:23:01 +000010960 if (cmd->autoneg == AUTONEG_ENABLE) {
10961 u32 mask = ADVERTISED_Autoneg |
10962 ADVERTISED_Pause |
10963 ADVERTISED_Asym_Pause;
10964
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010965 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
Matt Carlson7e5856b2009-02-25 14:23:01 +000010966 mask |= ADVERTISED_1000baseT_Half |
10967 ADVERTISED_1000baseT_Full;
10968
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010969 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlson7e5856b2009-02-25 14:23:01 +000010970 mask |= ADVERTISED_100baseT_Half |
10971 ADVERTISED_100baseT_Full |
10972 ADVERTISED_10baseT_Half |
10973 ADVERTISED_10baseT_Full |
10974 ADVERTISED_TP;
10975 else
10976 mask |= ADVERTISED_FIBRE;
10977
10978 if (cmd->advertising & ~mask)
10979 return -EINVAL;
10980
10981 mask &= (ADVERTISED_1000baseT_Half |
10982 ADVERTISED_1000baseT_Full |
10983 ADVERTISED_100baseT_Half |
10984 ADVERTISED_100baseT_Full |
10985 ADVERTISED_10baseT_Half |
10986 ADVERTISED_10baseT_Full);
10987
10988 cmd->advertising &= mask;
10989 } else {
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010990 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
David Decotigny25db0332011-04-27 18:32:39 +000010991 if (speed != SPEED_1000)
Matt Carlson7e5856b2009-02-25 14:23:01 +000010992 return -EINVAL;
10993
10994 if (cmd->duplex != DUPLEX_FULL)
10995 return -EINVAL;
10996 } else {
David Decotigny25db0332011-04-27 18:32:39 +000010997 if (speed != SPEED_100 &&
10998 speed != SPEED_10)
Matt Carlson7e5856b2009-02-25 14:23:01 +000010999 return -EINVAL;
11000 }
11001 }
11002
David S. Millerf47c11e2005-06-24 20:18:35 -070011003 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011004
11005 tp->link_config.autoneg = cmd->autoneg;
11006 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -070011007 tp->link_config.advertising = (cmd->advertising |
11008 ADVERTISED_Autoneg);
Matt Carlsone7405222012-02-13 15:20:16 +000011009 tp->link_config.speed = SPEED_UNKNOWN;
11010 tp->link_config.duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011011 } else {
11012 tp->link_config.advertising = 0;
David Decotigny25db0332011-04-27 18:32:39 +000011013 tp->link_config.speed = speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011014 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011015 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011016
Linus Torvalds1da177e2005-04-16 15:20:36 -070011017 if (netif_running(dev))
11018 tg3_setup_phy(tp, 1);
11019
David S. Millerf47c11e2005-06-24 20:18:35 -070011020 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011021
Linus Torvalds1da177e2005-04-16 15:20:36 -070011022 return 0;
11023}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011024
Linus Torvalds1da177e2005-04-16 15:20:36 -070011025static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11026{
11027 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011028
Rick Jones68aad782011-11-07 13:29:27 +000011029 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11030 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11031 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11032 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011033}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011034
Linus Torvalds1da177e2005-04-16 15:20:36 -070011035static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11036{
11037 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011038
Joe Perches63c3a662011-04-26 08:12:10 +000011039 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -070011040 wol->supported = WAKE_MAGIC;
11041 else
11042 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011043 wol->wolopts = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000011044 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011045 wol->wolopts = WAKE_MAGIC;
11046 memset(&wol->sopass, 0, sizeof(wol->sopass));
11047}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011048
Linus Torvalds1da177e2005-04-16 15:20:36 -070011049static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11050{
11051 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011052 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011053
Linus Torvalds1da177e2005-04-16 15:20:36 -070011054 if (wol->wolopts & ~WAKE_MAGIC)
11055 return -EINVAL;
11056 if ((wol->wolopts & WAKE_MAGIC) &&
Joe Perches63c3a662011-04-26 08:12:10 +000011057 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011058 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011059
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011060 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11061
David S. Millerf47c11e2005-06-24 20:18:35 -070011062 spin_lock_bh(&tp->lock);
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011063 if (device_may_wakeup(dp))
Joe Perches63c3a662011-04-26 08:12:10 +000011064 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011065 else
Joe Perches63c3a662011-04-26 08:12:10 +000011066 tg3_flag_clear(tp, WOL_ENABLE);
David S. Millerf47c11e2005-06-24 20:18:35 -070011067 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011068
Linus Torvalds1da177e2005-04-16 15:20:36 -070011069 return 0;
11070}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011071
Linus Torvalds1da177e2005-04-16 15:20:36 -070011072static u32 tg3_get_msglevel(struct net_device *dev)
11073{
11074 struct tg3 *tp = netdev_priv(dev);
11075 return tp->msg_enable;
11076}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011077
Linus Torvalds1da177e2005-04-16 15:20:36 -070011078static void tg3_set_msglevel(struct net_device *dev, u32 value)
11079{
11080 struct tg3 *tp = netdev_priv(dev);
11081 tp->msg_enable = value;
11082}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011083
Linus Torvalds1da177e2005-04-16 15:20:36 -070011084static int tg3_nway_reset(struct net_device *dev)
11085{
11086 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011087 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011088
Linus Torvalds1da177e2005-04-16 15:20:36 -070011089 if (!netif_running(dev))
11090 return -EAGAIN;
11091
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011092 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Michael Chanc94e3942005-09-27 12:12:42 -070011093 return -EINVAL;
11094
Joe Perches63c3a662011-04-26 08:12:10 +000011095 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011096 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011097 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000011098 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011099 } else {
11100 u32 bmcr;
11101
11102 spin_lock_bh(&tp->lock);
11103 r = -EINVAL;
11104 tg3_readphy(tp, MII_BMCR, &bmcr);
11105 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11106 ((bmcr & BMCR_ANENABLE) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011107 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011108 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11109 BMCR_ANENABLE);
11110 r = 0;
11111 }
11112 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011113 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011114
Linus Torvalds1da177e2005-04-16 15:20:36 -070011115 return r;
11116}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011117
Linus Torvalds1da177e2005-04-16 15:20:36 -070011118static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11119{
11120 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011121
Matt Carlson2c49a442010-09-30 10:34:35 +000011122 ering->rx_max_pending = tp->rx_std_ring_mask;
Joe Perches63c3a662011-04-26 08:12:10 +000011123 if (tg3_flag(tp, JUMBO_RING_ENABLE))
Matt Carlson2c49a442010-09-30 10:34:35 +000011124 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
Michael Chan4f81c322006-03-20 21:33:42 -080011125 else
11126 ering->rx_jumbo_max_pending = 0;
11127
11128 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011129
11130 ering->rx_pending = tp->rx_pending;
Joe Perches63c3a662011-04-26 08:12:10 +000011131 if (tg3_flag(tp, JUMBO_RING_ENABLE))
Michael Chan4f81c322006-03-20 21:33:42 -080011132 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11133 else
11134 ering->rx_jumbo_pending = 0;
11135
Matt Carlsonf3f3f272009-08-28 14:03:21 +000011136 ering->tx_pending = tp->napi[0].tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011137}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011138
Linus Torvalds1da177e2005-04-16 15:20:36 -070011139static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11140{
11141 struct tg3 *tp = netdev_priv(dev);
Matt Carlson646c9ed2009-09-01 12:58:41 +000011142 int i, irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011143
Matt Carlson2c49a442010-09-30 10:34:35 +000011144 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11145 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
Michael Chanbc3a9252006-10-18 20:55:18 -070011146 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11147 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Joe Perches63c3a662011-04-26 08:12:10 +000011148 (tg3_flag(tp, TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -070011149 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011150 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011151
Michael Chanbbe832c2005-06-24 20:20:04 -070011152 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011153 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011154 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070011155 irq_sync = 1;
11156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011157
Michael Chanbbe832c2005-06-24 20:20:04 -070011158 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011159
Linus Torvalds1da177e2005-04-16 15:20:36 -070011160 tp->rx_pending = ering->rx_pending;
11161
Joe Perches63c3a662011-04-26 08:12:10 +000011162 if (tg3_flag(tp, MAX_RXPEND_64) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011163 tp->rx_pending > 63)
11164 tp->rx_pending = 63;
11165 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
Matt Carlson646c9ed2009-09-01 12:58:41 +000011166
Matt Carlson6fd45cb2010-09-15 08:59:57 +000011167 for (i = 0; i < tp->irq_max; i++)
Matt Carlson646c9ed2009-09-01 12:58:41 +000011168 tp->napi[i].tx_pending = ering->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011169
11170 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -070011171 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -070011172 err = tg3_restart_hw(tp, 1);
11173 if (!err)
11174 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011175 }
11176
David S. Millerf47c11e2005-06-24 20:18:35 -070011177 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011178
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011179 if (irq_sync && !err)
11180 tg3_phy_start(tp);
11181
Michael Chanb9ec6c12006-07-25 16:37:27 -070011182 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011183}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011184
Linus Torvalds1da177e2005-04-16 15:20:36 -070011185static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11186{
11187 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011188
Joe Perches63c3a662011-04-26 08:12:10 +000011189 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
Matt Carlson8d018622007-12-20 20:05:44 -080011190
Matt Carlson4a2db502011-12-08 14:40:17 +000011191 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
Matt Carlson8d018622007-12-20 20:05:44 -080011192 epause->rx_pause = 1;
11193 else
11194 epause->rx_pause = 0;
11195
Matt Carlson4a2db502011-12-08 14:40:17 +000011196 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
Matt Carlson8d018622007-12-20 20:05:44 -080011197 epause->tx_pause = 1;
11198 else
11199 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011200}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011201
Linus Torvalds1da177e2005-04-16 15:20:36 -070011202static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11203{
11204 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011205 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011206
Joe Perches63c3a662011-04-26 08:12:10 +000011207 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson27121682010-02-17 15:16:57 +000011208 u32 newadv;
11209 struct phy_device *phydev;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011210
Matt Carlson27121682010-02-17 15:16:57 +000011211 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011212
Matt Carlson27121682010-02-17 15:16:57 +000011213 if (!(phydev->supported & SUPPORTED_Pause) ||
11214 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
Nicolas Kaiser2259dca2010-10-07 23:29:27 +000011215 (epause->rx_pause != epause->tx_pause)))
Matt Carlson27121682010-02-17 15:16:57 +000011216 return -EINVAL;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011217
Matt Carlson27121682010-02-17 15:16:57 +000011218 tp->link_config.flowctrl = 0;
11219 if (epause->rx_pause) {
11220 tp->link_config.flowctrl |= FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011221
Matt Carlson27121682010-02-17 15:16:57 +000011222 if (epause->tx_pause) {
Steve Glendinninge18ce342008-12-16 02:00:00 -080011223 tp->link_config.flowctrl |= FLOW_CTRL_TX;
Matt Carlson27121682010-02-17 15:16:57 +000011224 newadv = ADVERTISED_Pause;
11225 } else
11226 newadv = ADVERTISED_Pause |
11227 ADVERTISED_Asym_Pause;
11228 } else if (epause->tx_pause) {
11229 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11230 newadv = ADVERTISED_Asym_Pause;
11231 } else
11232 newadv = 0;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011233
Matt Carlson27121682010-02-17 15:16:57 +000011234 if (epause->autoneg)
Joe Perches63c3a662011-04-26 08:12:10 +000011235 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlson27121682010-02-17 15:16:57 +000011236 else
Joe Perches63c3a662011-04-26 08:12:10 +000011237 tg3_flag_clear(tp, PAUSE_AUTONEG);
Matt Carlson27121682010-02-17 15:16:57 +000011238
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011239 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson27121682010-02-17 15:16:57 +000011240 u32 oldadv = phydev->advertising &
11241 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11242 if (oldadv != newadv) {
11243 phydev->advertising &=
11244 ~(ADVERTISED_Pause |
11245 ADVERTISED_Asym_Pause);
11246 phydev->advertising |= newadv;
11247 if (phydev->autoneg) {
11248 /*
11249 * Always renegotiate the link to
11250 * inform our link partner of our
11251 * flow control settings, even if the
11252 * flow control is forced. Let
11253 * tg3_adjust_link() do the final
11254 * flow control setup.
11255 */
11256 return phy_start_aneg(phydev);
11257 }
11258 }
11259
11260 if (!epause->autoneg)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011261 tg3_setup_flow_control(tp, 0, 0);
Matt Carlson27121682010-02-17 15:16:57 +000011262 } else {
Matt Carlsonc6700ce2012-02-13 15:20:15 +000011263 tp->link_config.advertising &=
Matt Carlson27121682010-02-17 15:16:57 +000011264 ~(ADVERTISED_Pause |
11265 ADVERTISED_Asym_Pause);
Matt Carlsonc6700ce2012-02-13 15:20:15 +000011266 tp->link_config.advertising |= newadv;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011267 }
11268 } else {
11269 int irq_sync = 0;
11270
11271 if (netif_running(dev)) {
11272 tg3_netif_stop(tp);
11273 irq_sync = 1;
11274 }
11275
11276 tg3_full_lock(tp, irq_sync);
11277
11278 if (epause->autoneg)
Joe Perches63c3a662011-04-26 08:12:10 +000011279 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011280 else
Joe Perches63c3a662011-04-26 08:12:10 +000011281 tg3_flag_clear(tp, PAUSE_AUTONEG);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011282 if (epause->rx_pause)
Steve Glendinninge18ce342008-12-16 02:00:00 -080011283 tp->link_config.flowctrl |= FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011284 else
Steve Glendinninge18ce342008-12-16 02:00:00 -080011285 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011286 if (epause->tx_pause)
Steve Glendinninge18ce342008-12-16 02:00:00 -080011287 tp->link_config.flowctrl |= FLOW_CTRL_TX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011288 else
Steve Glendinninge18ce342008-12-16 02:00:00 -080011289 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011290
11291 if (netif_running(dev)) {
11292 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11293 err = tg3_restart_hw(tp, 1);
11294 if (!err)
11295 tg3_netif_start(tp);
11296 }
11297
11298 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070011299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011300
Michael Chanb9ec6c12006-07-25 16:37:27 -070011301 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011302}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011303
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011304static int tg3_get_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011305{
Jeff Garzikb9f2c042007-10-03 18:07:32 -070011306 switch (sset) {
11307 case ETH_SS_TEST:
11308 return TG3_NUM_TEST;
11309 case ETH_SS_STATS:
11310 return TG3_NUM_STATS;
11311 default:
11312 return -EOPNOTSUPP;
11313 }
Michael Chan4cafd3f2005-05-29 14:56:34 -070011314}
11315
Matt Carlson90415472011-12-16 13:33:23 +000011316static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11317 u32 *rules __always_unused)
11318{
11319 struct tg3 *tp = netdev_priv(dev);
11320
11321 if (!tg3_flag(tp, SUPPORT_MSIX))
11322 return -EOPNOTSUPP;
11323
11324 switch (info->cmd) {
11325 case ETHTOOL_GRXRINGS:
11326 if (netif_running(tp->dev))
Michael Chan91024262012-09-28 07:12:38 +000011327 info->data = tp->rxq_cnt;
Matt Carlson90415472011-12-16 13:33:23 +000011328 else {
11329 info->data = num_online_cpus();
Michael Chan91024262012-09-28 07:12:38 +000011330 if (info->data > TG3_RSS_MAX_NUM_QS)
11331 info->data = TG3_RSS_MAX_NUM_QS;
Matt Carlson90415472011-12-16 13:33:23 +000011332 }
11333
11334 /* The first interrupt vector only
11335 * handles link interrupts.
11336 */
11337 info->data -= 1;
11338 return 0;
11339
11340 default:
11341 return -EOPNOTSUPP;
11342 }
11343}
11344
11345static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11346{
11347 u32 size = 0;
11348 struct tg3 *tp = netdev_priv(dev);
11349
11350 if (tg3_flag(tp, SUPPORT_MSIX))
11351 size = TG3_RSS_INDIR_TBL_SIZE;
11352
11353 return size;
11354}
11355
11356static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11357{
11358 struct tg3 *tp = netdev_priv(dev);
11359 int i;
11360
11361 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11362 indir[i] = tp->rss_ind_tbl[i];
11363
11364 return 0;
11365}
11366
11367static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11368{
11369 struct tg3 *tp = netdev_priv(dev);
11370 size_t i;
11371
11372 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11373 tp->rss_ind_tbl[i] = indir[i];
11374
11375 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11376 return 0;
11377
11378 /* It is legal to write the indirection
11379 * table while the device is running.
11380 */
11381 tg3_full_lock(tp, 0);
11382 tg3_rss_write_indir_tbl(tp);
11383 tg3_full_unlock(tp);
11384
11385 return 0;
11386}
11387
Michael Chan09681692012-09-28 07:12:42 +000011388static void tg3_get_channels(struct net_device *dev,
11389 struct ethtool_channels *channel)
11390{
11391 struct tg3 *tp = netdev_priv(dev);
11392 u32 deflt_qs = netif_get_num_default_rss_queues();
11393
11394 channel->max_rx = tp->rxq_max;
11395 channel->max_tx = tp->txq_max;
11396
11397 if (netif_running(dev)) {
11398 channel->rx_count = tp->rxq_cnt;
11399 channel->tx_count = tp->txq_cnt;
11400 } else {
11401 if (tp->rxq_req)
11402 channel->rx_count = tp->rxq_req;
11403 else
11404 channel->rx_count = min(deflt_qs, tp->rxq_max);
11405
11406 if (tp->txq_req)
11407 channel->tx_count = tp->txq_req;
11408 else
11409 channel->tx_count = min(deflt_qs, tp->txq_max);
11410 }
11411}
11412
11413static int tg3_set_channels(struct net_device *dev,
11414 struct ethtool_channels *channel)
11415{
11416 struct tg3 *tp = netdev_priv(dev);
11417
11418 if (!tg3_flag(tp, SUPPORT_MSIX))
11419 return -EOPNOTSUPP;
11420
11421 if (channel->rx_count > tp->rxq_max ||
11422 channel->tx_count > tp->txq_max)
11423 return -EINVAL;
11424
11425 tp->rxq_req = channel->rx_count;
11426 tp->txq_req = channel->tx_count;
11427
11428 if (!netif_running(dev))
11429 return 0;
11430
11431 tg3_stop(tp);
11432
11433 netif_carrier_off(dev);
11434
11435 tg3_start(tp, true, false);
11436
11437 return 0;
11438}
11439
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011440static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011441{
11442 switch (stringset) {
11443 case ETH_SS_STATS:
11444 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11445 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -070011446 case ETH_SS_TEST:
11447 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11448 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011449 default:
11450 WARN_ON(1); /* we need a WARN() */
11451 break;
11452 }
11453}
11454
stephen hemminger81b87092011-04-04 08:43:50 +000011455static int tg3_set_phys_id(struct net_device *dev,
11456 enum ethtool_phys_id_state state)
Michael Chan4009a932005-09-05 17:52:54 -070011457{
11458 struct tg3 *tp = netdev_priv(dev);
Michael Chan4009a932005-09-05 17:52:54 -070011459
11460 if (!netif_running(tp->dev))
11461 return -EAGAIN;
11462
stephen hemminger81b87092011-04-04 08:43:50 +000011463 switch (state) {
11464 case ETHTOOL_ID_ACTIVE:
Allan, Bruce Wfce55922011-04-13 13:09:10 +000011465 return 1; /* cycle on/off once per second */
Michael Chan4009a932005-09-05 17:52:54 -070011466
stephen hemminger81b87092011-04-04 08:43:50 +000011467 case ETHTOOL_ID_ON:
11468 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11469 LED_CTRL_1000MBPS_ON |
11470 LED_CTRL_100MBPS_ON |
11471 LED_CTRL_10MBPS_ON |
11472 LED_CTRL_TRAFFIC_OVERRIDE |
11473 LED_CTRL_TRAFFIC_BLINK |
11474 LED_CTRL_TRAFFIC_LED);
11475 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011476
stephen hemminger81b87092011-04-04 08:43:50 +000011477 case ETHTOOL_ID_OFF:
11478 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11479 LED_CTRL_TRAFFIC_OVERRIDE);
11480 break;
Michael Chan4009a932005-09-05 17:52:54 -070011481
stephen hemminger81b87092011-04-04 08:43:50 +000011482 case ETHTOOL_ID_INACTIVE:
11483 tw32(MAC_LED_CTRL, tp->led_ctrl);
11484 break;
Michael Chan4009a932005-09-05 17:52:54 -070011485 }
stephen hemminger81b87092011-04-04 08:43:50 +000011486
Michael Chan4009a932005-09-05 17:52:54 -070011487 return 0;
11488}
11489
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011490static void tg3_get_ethtool_stats(struct net_device *dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -070011491 struct ethtool_stats *estats, u64 *tmp_stats)
11492{
11493 struct tg3 *tp = netdev_priv(dev);
Matt Carlson0e6c9da2011-12-08 14:40:13 +000011494
Matt Carlsonb546e462012-02-13 15:20:09 +000011495 if (tp->hw_stats)
11496 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11497 else
11498 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011499}
11500
Matt Carlson535a4902011-07-20 10:20:56 +000011501static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
Matt Carlsonc3e94502011-04-13 11:05:08 +000011502{
11503 int i;
11504 __be32 *buf;
11505 u32 offset = 0, len = 0;
11506 u32 magic, val;
11507
Joe Perches63c3a662011-04-26 08:12:10 +000011508 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
Matt Carlsonc3e94502011-04-13 11:05:08 +000011509 return NULL;
11510
11511 if (magic == TG3_EEPROM_MAGIC) {
11512 for (offset = TG3_NVM_DIR_START;
11513 offset < TG3_NVM_DIR_END;
11514 offset += TG3_NVM_DIRENT_SIZE) {
11515 if (tg3_nvram_read(tp, offset, &val))
11516 return NULL;
11517
11518 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11519 TG3_NVM_DIRTYPE_EXTVPD)
11520 break;
11521 }
11522
11523 if (offset != TG3_NVM_DIR_END) {
11524 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11525 if (tg3_nvram_read(tp, offset + 4, &offset))
11526 return NULL;
11527
11528 offset = tg3_nvram_logical_addr(tp, offset);
11529 }
11530 }
11531
11532 if (!offset || !len) {
11533 offset = TG3_NVM_VPD_OFF;
11534 len = TG3_NVM_VPD_LEN;
11535 }
11536
11537 buf = kmalloc(len, GFP_KERNEL);
11538 if (buf == NULL)
11539 return NULL;
11540
11541 if (magic == TG3_EEPROM_MAGIC) {
11542 for (i = 0; i < len; i += 4) {
11543 /* The data is in little-endian format in NVRAM.
11544 * Use the big-endian read routines to preserve
11545 * the byte order as it exists in NVRAM.
11546 */
11547 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11548 goto error;
11549 }
11550 } else {
11551 u8 *ptr;
11552 ssize_t cnt;
11553 unsigned int pos = 0;
11554
11555 ptr = (u8 *)&buf[0];
11556 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11557 cnt = pci_read_vpd(tp->pdev, pos,
11558 len - pos, ptr);
11559 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11560 cnt = 0;
11561 else if (cnt < 0)
11562 goto error;
11563 }
11564 if (pos != len)
11565 goto error;
11566 }
11567
Matt Carlson535a4902011-07-20 10:20:56 +000011568 *vpdlen = len;
11569
Matt Carlsonc3e94502011-04-13 11:05:08 +000011570 return buf;
11571
11572error:
11573 kfree(buf);
11574 return NULL;
11575}
11576
Michael Chan566f86a2005-05-29 14:56:58 -070011577#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -080011578#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11579#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11580#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Matt Carlson727a6d92011-06-13 13:38:58 +000011581#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11582#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
Matt Carlsonbda18fa2011-07-20 10:20:57 +000011583#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
Michael Chanb16250e2006-09-27 16:10:14 -070011584#define NVRAM_SELFBOOT_HW_SIZE 0x20
11585#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -070011586
11587static int tg3_test_nvram(struct tg3 *tp)
11588{
Matt Carlson535a4902011-07-20 10:20:56 +000011589 u32 csum, magic, len;
Matt Carlsona9dc5292009-02-25 14:25:30 +000011590 __be32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +010011591 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -070011592
Joe Perches63c3a662011-04-26 08:12:10 +000011593 if (tg3_flag(tp, NO_NVRAM))
Matt Carlsondf259d82009-04-20 06:57:14 +000011594 return 0;
11595
Matt Carlsone4f34112009-02-25 14:25:00 +000011596 if (tg3_nvram_read(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080011597 return -EIO;
11598
Michael Chan1b277772006-03-20 22:27:48 -080011599 if (magic == TG3_EEPROM_MAGIC)
11600 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -070011601 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -080011602 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11603 TG3_EEPROM_SB_FORMAT_1) {
11604 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11605 case TG3_EEPROM_SB_REVISION_0:
11606 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11607 break;
11608 case TG3_EEPROM_SB_REVISION_2:
11609 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11610 break;
11611 case TG3_EEPROM_SB_REVISION_3:
11612 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11613 break;
Matt Carlson727a6d92011-06-13 13:38:58 +000011614 case TG3_EEPROM_SB_REVISION_4:
11615 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11616 break;
11617 case TG3_EEPROM_SB_REVISION_5:
11618 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11619 break;
11620 case TG3_EEPROM_SB_REVISION_6:
11621 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11622 break;
Matt Carlsona5767de2007-11-12 21:10:58 -080011623 default:
Matt Carlson727a6d92011-06-13 13:38:58 +000011624 return -EIO;
Matt Carlsona5767de2007-11-12 21:10:58 -080011625 }
11626 } else
Michael Chan1b277772006-03-20 22:27:48 -080011627 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -070011628 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11629 size = NVRAM_SELFBOOT_HW_SIZE;
11630 else
Michael Chan1b277772006-03-20 22:27:48 -080011631 return -EIO;
11632
11633 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -070011634 if (buf == NULL)
11635 return -ENOMEM;
11636
Michael Chan1b277772006-03-20 22:27:48 -080011637 err = -EIO;
11638 for (i = 0, j = 0; i < size; i += 4, j++) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000011639 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11640 if (err)
Michael Chan566f86a2005-05-29 14:56:58 -070011641 break;
Michael Chan566f86a2005-05-29 14:56:58 -070011642 }
Michael Chan1b277772006-03-20 22:27:48 -080011643 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -070011644 goto out;
11645
Michael Chan1b277772006-03-20 22:27:48 -080011646 /* Selfboot format */
Matt Carlsona9dc5292009-02-25 14:25:30 +000011647 magic = be32_to_cpu(buf[0]);
Al Virob9fc7dc2007-12-17 22:59:57 -080011648 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -070011649 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -080011650 u8 *buf8 = (u8 *) buf, csum8 = 0;
11651
Al Virob9fc7dc2007-12-17 22:59:57 -080011652 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -080011653 TG3_EEPROM_SB_REVISION_2) {
11654 /* For rev 2, the csum doesn't include the MBA. */
11655 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11656 csum8 += buf8[i];
11657 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11658 csum8 += buf8[i];
11659 } else {
11660 for (i = 0; i < size; i++)
11661 csum8 += buf8[i];
11662 }
Michael Chan1b277772006-03-20 22:27:48 -080011663
Adrian Bunkad96b482006-04-05 22:21:04 -070011664 if (csum8 == 0) {
11665 err = 0;
11666 goto out;
11667 }
11668
11669 err = -EIO;
11670 goto out;
Michael Chan1b277772006-03-20 22:27:48 -080011671 }
Michael Chan566f86a2005-05-29 14:56:58 -070011672
Al Virob9fc7dc2007-12-17 22:59:57 -080011673 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -070011674 TG3_EEPROM_MAGIC_HW) {
11675 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
Matt Carlsona9dc5292009-02-25 14:25:30 +000011676 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
Michael Chanb16250e2006-09-27 16:10:14 -070011677 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -070011678
11679 /* Separate the parity bits and the data bytes. */
11680 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11681 if ((i == 0) || (i == 8)) {
11682 int l;
11683 u8 msk;
11684
11685 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11686 parity[k++] = buf8[i] & msk;
11687 i++;
Matt Carlson859a588792010-04-05 10:19:28 +000011688 } else if (i == 16) {
Michael Chanb16250e2006-09-27 16:10:14 -070011689 int l;
11690 u8 msk;
11691
11692 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11693 parity[k++] = buf8[i] & msk;
11694 i++;
11695
11696 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11697 parity[k++] = buf8[i] & msk;
11698 i++;
11699 }
11700 data[j++] = buf8[i];
11701 }
11702
11703 err = -EIO;
11704 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11705 u8 hw8 = hweight8(data[i]);
11706
11707 if ((hw8 & 0x1) && parity[i])
11708 goto out;
11709 else if (!(hw8 & 0x1) && !parity[i])
11710 goto out;
11711 }
11712 err = 0;
11713 goto out;
11714 }
11715
Matt Carlson01c3a392011-03-09 16:58:20 +000011716 err = -EIO;
11717
Michael Chan566f86a2005-05-29 14:56:58 -070011718 /* Bootstrap checksum at offset 0x10 */
11719 csum = calc_crc((unsigned char *) buf, 0x10);
Matt Carlson01c3a392011-03-09 16:58:20 +000011720 if (csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -070011721 goto out;
11722
11723 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11724 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Matt Carlson01c3a392011-03-09 16:58:20 +000011725 if (csum != le32_to_cpu(buf[0xfc/4]))
Matt Carlsona9dc5292009-02-25 14:25:30 +000011726 goto out;
Michael Chan566f86a2005-05-29 14:56:58 -070011727
Matt Carlsonc3e94502011-04-13 11:05:08 +000011728 kfree(buf);
11729
Matt Carlson535a4902011-07-20 10:20:56 +000011730 buf = tg3_vpd_readblock(tp, &len);
Matt Carlsonc3e94502011-04-13 11:05:08 +000011731 if (!buf)
11732 return -ENOMEM;
Matt Carlsond4894f32011-03-09 16:58:21 +000011733
Matt Carlson535a4902011-07-20 10:20:56 +000011734 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
Matt Carlsond4894f32011-03-09 16:58:21 +000011735 if (i > 0) {
11736 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11737 if (j < 0)
11738 goto out;
11739
Matt Carlson535a4902011-07-20 10:20:56 +000011740 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
Matt Carlsond4894f32011-03-09 16:58:21 +000011741 goto out;
11742
11743 i += PCI_VPD_LRDT_TAG_SIZE;
11744 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11745 PCI_VPD_RO_KEYWORD_CHKSUM);
11746 if (j > 0) {
11747 u8 csum8 = 0;
11748
11749 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11750
11751 for (i = 0; i <= j; i++)
11752 csum8 += ((u8 *)buf)[i];
11753
11754 if (csum8)
11755 goto out;
11756 }
11757 }
11758
Michael Chan566f86a2005-05-29 14:56:58 -070011759 err = 0;
11760
11761out:
11762 kfree(buf);
11763 return err;
11764}
11765
Michael Chanca430072005-05-29 14:57:23 -070011766#define TG3_SERDES_TIMEOUT_SEC 2
11767#define TG3_COPPER_TIMEOUT_SEC 6
11768
11769static int tg3_test_link(struct tg3 *tp)
11770{
11771 int i, max;
11772
11773 if (!netif_running(tp->dev))
11774 return -ENODEV;
11775
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011776 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -070011777 max = TG3_SERDES_TIMEOUT_SEC;
11778 else
11779 max = TG3_COPPER_TIMEOUT_SEC;
11780
11781 for (i = 0; i < max; i++) {
11782 if (netif_carrier_ok(tp->dev))
11783 return 0;
11784
11785 if (msleep_interruptible(1000))
11786 break;
11787 }
11788
11789 return -EIO;
11790}
11791
Michael Chana71116d2005-05-29 14:58:11 -070011792/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -080011793static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -070011794{
Michael Chanb16250e2006-09-27 16:10:14 -070011795 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -070011796 u32 offset, read_mask, write_mask, val, save_val, read_val;
11797 static struct {
11798 u16 offset;
11799 u16 flags;
11800#define TG3_FL_5705 0x1
11801#define TG3_FL_NOT_5705 0x2
11802#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -070011803#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -070011804 u32 read_mask;
11805 u32 write_mask;
11806 } reg_tbl[] = {
11807 /* MAC Control Registers */
11808 { MAC_MODE, TG3_FL_NOT_5705,
11809 0x00000000, 0x00ef6f8c },
11810 { MAC_MODE, TG3_FL_5705,
11811 0x00000000, 0x01ef6b8c },
11812 { MAC_STATUS, TG3_FL_NOT_5705,
11813 0x03800107, 0x00000000 },
11814 { MAC_STATUS, TG3_FL_5705,
11815 0x03800100, 0x00000000 },
11816 { MAC_ADDR_0_HIGH, 0x0000,
11817 0x00000000, 0x0000ffff },
11818 { MAC_ADDR_0_LOW, 0x0000,
Matt Carlsonc6cdf432010-04-05 10:19:26 +000011819 0x00000000, 0xffffffff },
Michael Chana71116d2005-05-29 14:58:11 -070011820 { MAC_RX_MTU_SIZE, 0x0000,
11821 0x00000000, 0x0000ffff },
11822 { MAC_TX_MODE, 0x0000,
11823 0x00000000, 0x00000070 },
11824 { MAC_TX_LENGTHS, 0x0000,
11825 0x00000000, 0x00003fff },
11826 { MAC_RX_MODE, TG3_FL_NOT_5705,
11827 0x00000000, 0x000007fc },
11828 { MAC_RX_MODE, TG3_FL_5705,
11829 0x00000000, 0x000007dc },
11830 { MAC_HASH_REG_0, 0x0000,
11831 0x00000000, 0xffffffff },
11832 { MAC_HASH_REG_1, 0x0000,
11833 0x00000000, 0xffffffff },
11834 { MAC_HASH_REG_2, 0x0000,
11835 0x00000000, 0xffffffff },
11836 { MAC_HASH_REG_3, 0x0000,
11837 0x00000000, 0xffffffff },
11838
11839 /* Receive Data and Receive BD Initiator Control Registers. */
11840 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11841 0x00000000, 0xffffffff },
11842 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11843 0x00000000, 0xffffffff },
11844 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11845 0x00000000, 0x00000003 },
11846 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11847 0x00000000, 0xffffffff },
11848 { RCVDBDI_STD_BD+0, 0x0000,
11849 0x00000000, 0xffffffff },
11850 { RCVDBDI_STD_BD+4, 0x0000,
11851 0x00000000, 0xffffffff },
11852 { RCVDBDI_STD_BD+8, 0x0000,
11853 0x00000000, 0xffff0002 },
11854 { RCVDBDI_STD_BD+0xc, 0x0000,
11855 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011856
Michael Chana71116d2005-05-29 14:58:11 -070011857 /* Receive BD Initiator Control Registers. */
11858 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11859 0x00000000, 0xffffffff },
11860 { RCVBDI_STD_THRESH, TG3_FL_5705,
11861 0x00000000, 0x000003ff },
11862 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11863 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011864
Michael Chana71116d2005-05-29 14:58:11 -070011865 /* Host Coalescing Control Registers. */
11866 { HOSTCC_MODE, TG3_FL_NOT_5705,
11867 0x00000000, 0x00000004 },
11868 { HOSTCC_MODE, TG3_FL_5705,
11869 0x00000000, 0x000000f6 },
11870 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11871 0x00000000, 0xffffffff },
11872 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11873 0x00000000, 0x000003ff },
11874 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11875 0x00000000, 0xffffffff },
11876 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11877 0x00000000, 0x000003ff },
11878 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11879 0x00000000, 0xffffffff },
11880 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11881 0x00000000, 0x000000ff },
11882 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11883 0x00000000, 0xffffffff },
11884 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11885 0x00000000, 0x000000ff },
11886 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11887 0x00000000, 0xffffffff },
11888 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11889 0x00000000, 0xffffffff },
11890 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11891 0x00000000, 0xffffffff },
11892 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11893 0x00000000, 0x000000ff },
11894 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11895 0x00000000, 0xffffffff },
11896 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11897 0x00000000, 0x000000ff },
11898 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11899 0x00000000, 0xffffffff },
11900 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11901 0x00000000, 0xffffffff },
11902 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11903 0x00000000, 0xffffffff },
11904 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11905 0x00000000, 0xffffffff },
11906 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11907 0x00000000, 0xffffffff },
11908 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11909 0xffffffff, 0x00000000 },
11910 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11911 0xffffffff, 0x00000000 },
11912
11913 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -070011914 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -070011915 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -070011916 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -070011917 0x00000000, 0x007fffff },
11918 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11919 0x00000000, 0x0000003f },
11920 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11921 0x00000000, 0x000001ff },
11922 { BUFMGR_MB_HIGH_WATER, 0x0000,
11923 0x00000000, 0x000001ff },
11924 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11925 0xffffffff, 0x00000000 },
11926 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11927 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011928
Michael Chana71116d2005-05-29 14:58:11 -070011929 /* Mailbox Registers */
11930 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11931 0x00000000, 0x000001ff },
11932 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11933 0x00000000, 0x000001ff },
11934 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11935 0x00000000, 0x000007ff },
11936 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11937 0x00000000, 0x000001ff },
11938
11939 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11940 };
11941
Michael Chanb16250e2006-09-27 16:10:14 -070011942 is_5705 = is_5750 = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000011943 if (tg3_flag(tp, 5705_PLUS)) {
Michael Chana71116d2005-05-29 14:58:11 -070011944 is_5705 = 1;
Joe Perches63c3a662011-04-26 08:12:10 +000011945 if (tg3_flag(tp, 5750_PLUS))
Michael Chanb16250e2006-09-27 16:10:14 -070011946 is_5750 = 1;
11947 }
Michael Chana71116d2005-05-29 14:58:11 -070011948
11949 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11950 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11951 continue;
11952
11953 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11954 continue;
11955
Joe Perches63c3a662011-04-26 08:12:10 +000011956 if (tg3_flag(tp, IS_5788) &&
Michael Chana71116d2005-05-29 14:58:11 -070011957 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11958 continue;
11959
Michael Chanb16250e2006-09-27 16:10:14 -070011960 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11961 continue;
11962
Michael Chana71116d2005-05-29 14:58:11 -070011963 offset = (u32) reg_tbl[i].offset;
11964 read_mask = reg_tbl[i].read_mask;
11965 write_mask = reg_tbl[i].write_mask;
11966
11967 /* Save the original register content */
11968 save_val = tr32(offset);
11969
11970 /* Determine the read-only value. */
11971 read_val = save_val & read_mask;
11972
11973 /* Write zero to the register, then make sure the read-only bits
11974 * are not changed and the read/write bits are all zeros.
11975 */
11976 tw32(offset, 0);
11977
11978 val = tr32(offset);
11979
11980 /* Test the read-only and read/write bits. */
11981 if (((val & read_mask) != read_val) || (val & write_mask))
11982 goto out;
11983
11984 /* Write ones to all the bits defined by RdMask and WrMask, then
11985 * make sure the read-only bits are not changed and the
11986 * read/write bits are all ones.
11987 */
11988 tw32(offset, read_mask | write_mask);
11989
11990 val = tr32(offset);
11991
11992 /* Test the read-only bits. */
11993 if ((val & read_mask) != read_val)
11994 goto out;
11995
11996 /* Test the read/write bits. */
11997 if ((val & write_mask) != write_mask)
11998 goto out;
11999
12000 tw32(offset, save_val);
12001 }
12002
12003 return 0;
12004
12005out:
Michael Chan9f88f292006-12-07 00:22:54 -080012006 if (netif_msg_hw(tp))
Matt Carlson2445e462010-04-05 10:19:21 +000012007 netdev_err(tp->dev,
12008 "Register test failed at offset %x\n", offset);
Michael Chana71116d2005-05-29 14:58:11 -070012009 tw32(offset, save_val);
12010 return -EIO;
12011}
12012
Michael Chan7942e1d2005-05-29 14:58:36 -070012013static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12014{
Arjan van de Venf71e1302006-03-03 21:33:57 -050012015 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -070012016 int i;
12017 u32 j;
12018
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +020012019 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -070012020 for (j = 0; j < len; j += 4) {
12021 u32 val;
12022
12023 tg3_write_mem(tp, offset + j, test_pattern[i]);
12024 tg3_read_mem(tp, offset + j, &val);
12025 if (val != test_pattern[i])
12026 return -EIO;
12027 }
12028 }
12029 return 0;
12030}
12031
12032static int tg3_test_memory(struct tg3 *tp)
12033{
12034 static struct mem_entry {
12035 u32 offset;
12036 u32 len;
12037 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -080012038 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -070012039 { 0x00002000, 0x1c000},
12040 { 0xffffffff, 0x00000}
12041 }, mem_tbl_5705[] = {
12042 { 0x00000100, 0x0000c},
12043 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -070012044 { 0x00004000, 0x00800},
12045 { 0x00006000, 0x01000},
12046 { 0x00008000, 0x02000},
12047 { 0x00010000, 0x0e000},
12048 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -080012049 }, mem_tbl_5755[] = {
12050 { 0x00000200, 0x00008},
12051 { 0x00004000, 0x00800},
12052 { 0x00006000, 0x00800},
12053 { 0x00008000, 0x02000},
12054 { 0x00010000, 0x0c000},
12055 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -070012056 }, mem_tbl_5906[] = {
12057 { 0x00000200, 0x00008},
12058 { 0x00004000, 0x00400},
12059 { 0x00006000, 0x00400},
12060 { 0x00008000, 0x01000},
12061 { 0x00010000, 0x01000},
12062 { 0xffffffff, 0x00000}
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012063 }, mem_tbl_5717[] = {
12064 { 0x00000200, 0x00008},
12065 { 0x00010000, 0x0a000},
12066 { 0x00020000, 0x13c00},
12067 { 0xffffffff, 0x00000}
12068 }, mem_tbl_57765[] = {
12069 { 0x00000200, 0x00008},
12070 { 0x00004000, 0x00800},
12071 { 0x00006000, 0x09800},
12072 { 0x00010000, 0x0a000},
12073 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -070012074 };
12075 struct mem_entry *mem_tbl;
12076 int err = 0;
12077 int i;
12078
Joe Perches63c3a662011-04-26 08:12:10 +000012079 if (tg3_flag(tp, 5717_PLUS))
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012080 mem_tbl = mem_tbl_5717;
Matt Carlson55086ad2011-12-14 11:09:59 +000012081 else if (tg3_flag(tp, 57765_CLASS))
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012082 mem_tbl = mem_tbl_57765;
Joe Perches63c3a662011-04-26 08:12:10 +000012083 else if (tg3_flag(tp, 5755_PLUS))
Matt Carlson321d32a2008-11-21 17:22:19 -080012084 mem_tbl = mem_tbl_5755;
12085 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12086 mem_tbl = mem_tbl_5906;
Joe Perches63c3a662011-04-26 08:12:10 +000012087 else if (tg3_flag(tp, 5705_PLUS))
Matt Carlson321d32a2008-11-21 17:22:19 -080012088 mem_tbl = mem_tbl_5705;
12089 else
Michael Chan7942e1d2005-05-29 14:58:36 -070012090 mem_tbl = mem_tbl_570x;
12091
12092 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
Matt Carlsonbe98da62010-07-11 09:31:46 +000012093 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12094 if (err)
Michael Chan7942e1d2005-05-29 14:58:36 -070012095 break;
12096 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012097
Michael Chan7942e1d2005-05-29 14:58:36 -070012098 return err;
12099}
12100
Matt Carlsonbb158d62011-04-25 12:42:47 +000012101#define TG3_TSO_MSS 500
12102
12103#define TG3_TSO_IP_HDR_LEN 20
12104#define TG3_TSO_TCP_HDR_LEN 20
12105#define TG3_TSO_TCP_OPT_LEN 12
12106
12107static const u8 tg3_tso_header[] = {
121080x08, 0x00,
121090x45, 0x00, 0x00, 0x00,
121100x00, 0x00, 0x40, 0x00,
121110x40, 0x06, 0x00, 0x00,
121120x0a, 0x00, 0x00, 0x01,
121130x0a, 0x00, 0x00, 0x02,
121140x0d, 0x00, 0xe0, 0x00,
121150x00, 0x00, 0x01, 0x00,
121160x00, 0x00, 0x02, 0x00,
121170x80, 0x10, 0x10, 0x00,
121180x14, 0x09, 0x00, 0x00,
121190x01, 0x01, 0x08, 0x0a,
121200x11, 0x11, 0x11, 0x11,
121210x11, 0x11, 0x11, 0x11,
12122};
Michael Chan9f40dea2005-09-05 17:53:06 -070012123
Matt Carlson28a45952011-08-19 13:58:22 +000012124static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
Michael Chanc76949a2005-05-29 14:58:59 -070012125{
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012126 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012127 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
Matt Carlson84b67b22011-07-27 14:20:52 +000012128 u32 budget;
Eric Dumazet9205fd92011-11-18 06:47:01 +000012129 struct sk_buff *skb;
12130 u8 *tx_data, *rx_data;
Michael Chanc76949a2005-05-29 14:58:59 -070012131 dma_addr_t map;
12132 int num_pkts, tx_len, rx_len, i, err;
12133 struct tg3_rx_buffer_desc *desc;
Matt Carlson898a56f2009-08-28 14:02:40 +000012134 struct tg3_napi *tnapi, *rnapi;
Matt Carlson8fea32b2010-09-15 08:59:58 +000012135 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
Michael Chanc76949a2005-05-29 14:58:59 -070012136
Matt Carlsonc8873402010-02-12 14:47:11 +000012137 tnapi = &tp->napi[0];
12138 rnapi = &tp->napi[0];
Matt Carlson0c1d0e22009-09-01 13:16:33 +000012139 if (tp->irq_cnt > 1) {
Joe Perches63c3a662011-04-26 08:12:10 +000012140 if (tg3_flag(tp, ENABLE_RSS))
Matt Carlson1da85aa2010-09-30 10:34:34 +000012141 rnapi = &tp->napi[1];
Joe Perches63c3a662011-04-26 08:12:10 +000012142 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc8873402010-02-12 14:47:11 +000012143 tnapi = &tp->napi[1];
Matt Carlson0c1d0e22009-09-01 13:16:33 +000012144 }
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012145 coal_now = tnapi->coal_now | rnapi->coal_now;
Matt Carlson898a56f2009-08-28 14:02:40 +000012146
Michael Chanc76949a2005-05-29 14:58:59 -070012147 err = -EIO;
12148
Matt Carlson4852a862011-04-13 11:05:07 +000012149 tx_len = pktsz;
David S. Millera20e9c62006-07-31 22:38:16 -070012150 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070012151 if (!skb)
12152 return -ENOMEM;
12153
Michael Chanc76949a2005-05-29 14:58:59 -070012154 tx_data = skb_put(skb, tx_len);
12155 memcpy(tx_data, tp->dev->dev_addr, 6);
12156 memset(tx_data + 6, 0x0, 8);
12157
Matt Carlson4852a862011-04-13 11:05:07 +000012158 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
Michael Chanc76949a2005-05-29 14:58:59 -070012159
Matt Carlson28a45952011-08-19 13:58:22 +000012160 if (tso_loopback) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012161 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12162
12163 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12164 TG3_TSO_TCP_OPT_LEN;
12165
12166 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12167 sizeof(tg3_tso_header));
12168 mss = TG3_TSO_MSS;
12169
12170 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12171 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12172
12173 /* Set the total length field in the IP header */
12174 iph->tot_len = htons((u16)(mss + hdr_len));
12175
12176 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12177 TXD_FLAG_CPU_POST_DMA);
12178
Joe Perches63c3a662011-04-26 08:12:10 +000012179 if (tg3_flag(tp, HW_TSO_1) ||
12180 tg3_flag(tp, HW_TSO_2) ||
12181 tg3_flag(tp, HW_TSO_3)) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012182 struct tcphdr *th;
12183 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12184 th = (struct tcphdr *)&tx_data[val];
12185 th->check = 0;
12186 } else
12187 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12188
Joe Perches63c3a662011-04-26 08:12:10 +000012189 if (tg3_flag(tp, HW_TSO_3)) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012190 mss |= (hdr_len & 0xc) << 12;
12191 if (hdr_len & 0x10)
12192 base_flags |= 0x00000010;
12193 base_flags |= (hdr_len & 0x3e0) << 5;
Joe Perches63c3a662011-04-26 08:12:10 +000012194 } else if (tg3_flag(tp, HW_TSO_2))
Matt Carlsonbb158d62011-04-25 12:42:47 +000012195 mss |= hdr_len << 9;
Joe Perches63c3a662011-04-26 08:12:10 +000012196 else if (tg3_flag(tp, HW_TSO_1) ||
Matt Carlsonbb158d62011-04-25 12:42:47 +000012197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12198 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12199 } else {
12200 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12201 }
12202
12203 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12204 } else {
12205 num_pkts = 1;
12206 data_off = ETH_HLEN;
Michael Chanc441b452012-03-04 14:48:13 +000012207
12208 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12209 tx_len > VLAN_ETH_FRAME_LEN)
12210 base_flags |= TXD_FLAG_JMB_PKT;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012211 }
12212
12213 for (i = data_off; i < tx_len; i++)
Michael Chanc76949a2005-05-29 14:58:59 -070012214 tx_data[i] = (u8) (i & 0xff);
12215
Alexander Duyckf4188d82009-12-02 16:48:38 +000012216 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12217 if (pci_dma_mapping_error(tp->pdev, map)) {
Matt Carlsona21771d2009-11-02 14:25:31 +000012218 dev_kfree_skb(skb);
12219 return -EIO;
12220 }
Michael Chanc76949a2005-05-29 14:58:59 -070012221
Matt Carlson0d681b22011-07-27 14:20:49 +000012222 val = tnapi->tx_prod;
12223 tnapi->tx_buffers[val].skb = skb;
12224 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12225
Michael Chanc76949a2005-05-29 14:58:59 -070012226 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012227 rnapi->coal_now);
Michael Chanc76949a2005-05-29 14:58:59 -070012228
12229 udelay(10);
12230
Matt Carlson898a56f2009-08-28 14:02:40 +000012231 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
Michael Chanc76949a2005-05-29 14:58:59 -070012232
Matt Carlson84b67b22011-07-27 14:20:52 +000012233 budget = tg3_tx_avail(tnapi);
12234 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
Matt Carlsond1a3b732011-07-27 14:20:51 +000012235 base_flags | TXD_FLAG_END, mss, 0)) {
12236 tnapi->tx_buffers[val].skb = NULL;
12237 dev_kfree_skb(skb);
12238 return -EIO;
12239 }
Michael Chanc76949a2005-05-29 14:58:59 -070012240
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012241 tnapi->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070012242
Michael Chan6541b802012-03-04 14:48:14 +000012243 /* Sync BD data before updating mailbox */
12244 wmb();
12245
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012246 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12247 tr32_mailbox(tnapi->prodmbox);
Michael Chanc76949a2005-05-29 14:58:59 -070012248
12249 udelay(10);
12250
Matt Carlson303fc922009-11-02 14:27:34 +000012251 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12252 for (i = 0; i < 35; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070012253 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012254 coal_now);
Michael Chanc76949a2005-05-29 14:58:59 -070012255
12256 udelay(10);
12257
Matt Carlson898a56f2009-08-28 14:02:40 +000012258 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12259 rx_idx = rnapi->hw_status->idx[0].rx_producer;
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012260 if ((tx_idx == tnapi->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070012261 (rx_idx == (rx_start_idx + num_pkts)))
12262 break;
12263 }
12264
Matt Carlsonba1142e2011-11-04 09:15:00 +000012265 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
Michael Chanc76949a2005-05-29 14:58:59 -070012266 dev_kfree_skb(skb);
12267
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012268 if (tx_idx != tnapi->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070012269 goto out;
12270
12271 if (rx_idx != rx_start_idx + num_pkts)
12272 goto out;
12273
Matt Carlsonbb158d62011-04-25 12:42:47 +000012274 val = data_off;
12275 while (rx_idx != rx_start_idx) {
12276 desc = &rnapi->rx_rcb[rx_start_idx++];
12277 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12278 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
Michael Chanc76949a2005-05-29 14:58:59 -070012279
Matt Carlsonbb158d62011-04-25 12:42:47 +000012280 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12281 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
Matt Carlson4852a862011-04-13 11:05:07 +000012282 goto out;
Michael Chanc76949a2005-05-29 14:58:59 -070012283
Matt Carlsonbb158d62011-04-25 12:42:47 +000012284 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12285 - ETH_FCS_LEN;
12286
Matt Carlson28a45952011-08-19 13:58:22 +000012287 if (!tso_loopback) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012288 if (rx_len != tx_len)
12289 goto out;
12290
12291 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12292 if (opaque_key != RXD_OPAQUE_RING_STD)
12293 goto out;
12294 } else {
12295 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12296 goto out;
12297 }
12298 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12299 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
Matt Carlson54e0a672011-05-19 12:12:50 +000012300 >> RXD_TCPCSUM_SHIFT != 0xffff) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012301 goto out;
12302 }
12303
12304 if (opaque_key == RXD_OPAQUE_RING_STD) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012305 rx_data = tpr->rx_std_buffers[desc_idx].data;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012306 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12307 mapping);
12308 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012309 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012310 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12311 mapping);
12312 } else
Matt Carlson4852a862011-04-13 11:05:07 +000012313 goto out;
12314
Matt Carlsonbb158d62011-04-25 12:42:47 +000012315 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12316 PCI_DMA_FROMDEVICE);
12317
Eric Dumazet9205fd92011-11-18 06:47:01 +000012318 rx_data += TG3_RX_OFFSET(tp);
Matt Carlsonbb158d62011-04-25 12:42:47 +000012319 for (i = data_off; i < rx_len; i++, val++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012320 if (*(rx_data + i) != (u8) (val & 0xff))
Matt Carlsonbb158d62011-04-25 12:42:47 +000012321 goto out;
12322 }
Matt Carlson4852a862011-04-13 11:05:07 +000012323 }
12324
Michael Chanc76949a2005-05-29 14:58:59 -070012325 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012326
Eric Dumazet9205fd92011-11-18 06:47:01 +000012327 /* tg3_free_rings will unmap and free the rx_data */
Michael Chanc76949a2005-05-29 14:58:59 -070012328out:
12329 return err;
12330}
12331
Matt Carlson00c266b2011-04-25 12:42:46 +000012332#define TG3_STD_LOOPBACK_FAILED 1
12333#define TG3_JMB_LOOPBACK_FAILED 2
Matt Carlsonbb158d62011-04-25 12:42:47 +000012334#define TG3_TSO_LOOPBACK_FAILED 4
Matt Carlson28a45952011-08-19 13:58:22 +000012335#define TG3_LOOPBACK_FAILED \
12336 (TG3_STD_LOOPBACK_FAILED | \
12337 TG3_JMB_LOOPBACK_FAILED | \
12338 TG3_TSO_LOOPBACK_FAILED)
Matt Carlson00c266b2011-04-25 12:42:46 +000012339
Matt Carlson941ec902011-08-19 13:58:23 +000012340static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
Michael Chan9f40dea2005-09-05 17:53:06 -070012341{
Matt Carlson28a45952011-08-19 13:58:22 +000012342 int err = -EIO;
Matt Carlson2215e242011-08-19 13:58:19 +000012343 u32 eee_cap;
Michael Chanc441b452012-03-04 14:48:13 +000012344 u32 jmb_pkt_sz = 9000;
12345
12346 if (tp->dma_limit)
12347 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
Michael Chan9f40dea2005-09-05 17:53:06 -070012348
Matt Carlsonab789042011-01-25 15:58:54 +000012349 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12350 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12351
Matt Carlson28a45952011-08-19 13:58:22 +000012352 if (!netif_running(tp->dev)) {
12353 data[0] = TG3_LOOPBACK_FAILED;
12354 data[1] = TG3_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012355 if (do_extlpbk)
12356 data[2] = TG3_LOOPBACK_FAILED;
Matt Carlson28a45952011-08-19 13:58:22 +000012357 goto done;
12358 }
12359
Michael Chanb9ec6c12006-07-25 16:37:27 -070012360 err = tg3_reset_hw(tp, 1);
Matt Carlsonab789042011-01-25 15:58:54 +000012361 if (err) {
Matt Carlson28a45952011-08-19 13:58:22 +000012362 data[0] = TG3_LOOPBACK_FAILED;
12363 data[1] = TG3_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012364 if (do_extlpbk)
12365 data[2] = TG3_LOOPBACK_FAILED;
Matt Carlsonab789042011-01-25 15:58:54 +000012366 goto done;
12367 }
Michael Chan9f40dea2005-09-05 17:53:06 -070012368
Joe Perches63c3a662011-04-26 08:12:10 +000012369 if (tg3_flag(tp, ENABLE_RSS)) {
Matt Carlson4a85f092011-04-20 07:57:37 +000012370 int i;
12371
12372 /* Reroute all rx packets to the 1st queue */
12373 for (i = MAC_RSS_INDIR_TBL_0;
12374 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12375 tw32(i, 0x0);
12376 }
12377
Matt Carlson6e01b202011-08-19 13:58:20 +000012378 /* HW errata - mac loopback fails in some cases on 5780.
12379 * Normal traffic and PHY loopback are not affected by
12380 * errata. Also, the MAC loopback test is deprecated for
12381 * all newer ASIC revisions.
12382 */
12383 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12384 !tg3_flag(tp, CPMU_PRESENT)) {
12385 tg3_mac_loopback(tp, true);
Matt Carlson9936bcf2007-10-10 18:03:07 -070012386
Matt Carlson28a45952011-08-19 13:58:22 +000012387 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12388 data[0] |= TG3_STD_LOOPBACK_FAILED;
Matt Carlson6e01b202011-08-19 13:58:20 +000012389
12390 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012391 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Matt Carlson28a45952011-08-19 13:58:22 +000012392 data[0] |= TG3_JMB_LOOPBACK_FAILED;
Matt Carlson6e01b202011-08-19 13:58:20 +000012393
12394 tg3_mac_loopback(tp, false);
12395 }
Matt Carlson4852a862011-04-13 11:05:07 +000012396
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012397 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +000012398 !tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012399 int i;
12400
Matt Carlson941ec902011-08-19 13:58:23 +000012401 tg3_phy_lpbk_set(tp, 0, false);
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012402
12403 /* Wait for link */
12404 for (i = 0; i < 100; i++) {
12405 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12406 break;
12407 mdelay(1);
12408 }
12409
Matt Carlson28a45952011-08-19 13:58:22 +000012410 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12411 data[1] |= TG3_STD_LOOPBACK_FAILED;
Joe Perches63c3a662011-04-26 08:12:10 +000012412 if (tg3_flag(tp, TSO_CAPABLE) &&
Matt Carlson28a45952011-08-19 13:58:22 +000012413 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12414 data[1] |= TG3_TSO_LOOPBACK_FAILED;
Joe Perches63c3a662011-04-26 08:12:10 +000012415 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012416 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Matt Carlson28a45952011-08-19 13:58:22 +000012417 data[1] |= TG3_JMB_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070012418
Matt Carlson941ec902011-08-19 13:58:23 +000012419 if (do_extlpbk) {
12420 tg3_phy_lpbk_set(tp, 0, true);
12421
12422 /* All link indications report up, but the hardware
12423 * isn't really ready for about 20 msec. Double it
12424 * to be sure.
12425 */
12426 mdelay(40);
12427
12428 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12429 data[2] |= TG3_STD_LOOPBACK_FAILED;
12430 if (tg3_flag(tp, TSO_CAPABLE) &&
12431 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12432 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12433 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012434 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Matt Carlson941ec902011-08-19 13:58:23 +000012435 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12436 }
12437
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012438 /* Re-enable gphy autopowerdown. */
12439 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12440 tg3_phy_toggle_apd(tp, true);
12441 }
Matt Carlson6833c042008-11-21 17:18:59 -080012442
Matt Carlson941ec902011-08-19 13:58:23 +000012443 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
Matt Carlson28a45952011-08-19 13:58:22 +000012444
Matt Carlsonab789042011-01-25 15:58:54 +000012445done:
12446 tp->phy_flags |= eee_cap;
12447
Michael Chan9f40dea2005-09-05 17:53:06 -070012448 return err;
12449}
12450
Michael Chan4cafd3f2005-05-29 14:56:34 -070012451static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12452 u64 *data)
12453{
Michael Chan566f86a2005-05-29 14:56:58 -070012454 struct tg3 *tp = netdev_priv(dev);
Matt Carlson941ec902011-08-19 13:58:23 +000012455 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
Michael Chan566f86a2005-05-29 14:56:58 -070012456
Matt Carlsonbed98292011-07-13 09:27:29 +000012457 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12458 tg3_power_up(tp)) {
12459 etest->flags |= ETH_TEST_FL_FAILED;
12460 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12461 return;
12462 }
Michael Chanbc1c7562006-03-20 17:48:03 -080012463
Michael Chan566f86a2005-05-29 14:56:58 -070012464 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12465
12466 if (tg3_test_nvram(tp) != 0) {
12467 etest->flags |= ETH_TEST_FL_FAILED;
12468 data[0] = 1;
12469 }
Matt Carlson941ec902011-08-19 13:58:23 +000012470 if (!doextlpbk && tg3_test_link(tp)) {
Michael Chanca430072005-05-29 14:57:23 -070012471 etest->flags |= ETH_TEST_FL_FAILED;
12472 data[1] = 1;
12473 }
Michael Chana71116d2005-05-29 14:58:11 -070012474 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012475 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070012476
Michael Chanbbe832c2005-06-24 20:20:04 -070012477 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012478 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070012479 tg3_netif_stop(tp);
12480 irq_sync = 1;
12481 }
12482
12483 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070012484
12485 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080012486 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012487 tg3_halt_cpu(tp, RX_CPU_BASE);
Joe Perches63c3a662011-04-26 08:12:10 +000012488 if (!tg3_flag(tp, 5705_PLUS))
Michael Chana71116d2005-05-29 14:58:11 -070012489 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080012490 if (!err)
12491 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012492
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012493 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chand9ab5ad2006-03-20 22:27:35 -080012494 tg3_phy_reset(tp);
12495
Michael Chana71116d2005-05-29 14:58:11 -070012496 if (tg3_test_registers(tp) != 0) {
12497 etest->flags |= ETH_TEST_FL_FAILED;
12498 data[2] = 1;
12499 }
Matt Carlson28a45952011-08-19 13:58:22 +000012500
Michael Chan7942e1d2005-05-29 14:58:36 -070012501 if (tg3_test_memory(tp) != 0) {
12502 etest->flags |= ETH_TEST_FL_FAILED;
12503 data[3] = 1;
12504 }
Matt Carlson28a45952011-08-19 13:58:22 +000012505
Matt Carlson941ec902011-08-19 13:58:23 +000012506 if (doextlpbk)
12507 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12508
12509 if (tg3_test_loopback(tp, &data[4], doextlpbk))
Michael Chanc76949a2005-05-29 14:58:59 -070012510 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070012511
David S. Millerf47c11e2005-06-24 20:18:35 -070012512 tg3_full_unlock(tp);
12513
Michael Chand4bc3922005-05-29 14:59:20 -070012514 if (tg3_test_interrupt(tp) != 0) {
12515 etest->flags |= ETH_TEST_FL_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012516 data[7] = 1;
Michael Chand4bc3922005-05-29 14:59:20 -070012517 }
David S. Millerf47c11e2005-06-24 20:18:35 -070012518
12519 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070012520
Michael Chana71116d2005-05-29 14:58:11 -070012521 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12522 if (netif_running(dev)) {
Joe Perches63c3a662011-04-26 08:12:10 +000012523 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012524 err2 = tg3_restart_hw(tp, 1);
12525 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070012526 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012527 }
David S. Millerf47c11e2005-06-24 20:18:35 -070012528
12529 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012530
12531 if (irq_sync && !err2)
12532 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070012533 }
Matt Carlson800960682010-08-02 11:26:06 +000012534 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000012535 tg3_power_down(tp);
Michael Chanbc1c7562006-03-20 17:48:03 -080012536
Michael Chan4cafd3f2005-05-29 14:56:34 -070012537}
12538
Linus Torvalds1da177e2005-04-16 15:20:36 -070012539static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12540{
12541 struct mii_ioctl_data *data = if_mii(ifr);
12542 struct tg3 *tp = netdev_priv(dev);
12543 int err;
12544
Joe Perches63c3a662011-04-26 08:12:10 +000012545 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000012546 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012547 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012548 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000012549 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Richard Cochran28b04112010-07-17 08:48:55 +000012550 return phy_mii_ioctl(phydev, ifr, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012551 }
12552
Matt Carlson33f401a2010-04-05 10:19:27 +000012553 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012554 case SIOCGMIIPHY:
Matt Carlson882e9792009-09-01 13:21:36 +000012555 data->phy_id = tp->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012556
12557 /* fallthru */
12558 case SIOCGMIIREG: {
12559 u32 mii_regval;
12560
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012561 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012562 break; /* We have no PHY */
12563
Matt Carlson34eea5a2011-04-20 07:57:38 +000012564 if (!netif_running(dev))
Michael Chanbc1c7562006-03-20 17:48:03 -080012565 return -EAGAIN;
12566
David S. Millerf47c11e2005-06-24 20:18:35 -070012567 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012568 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070012569 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012570
12571 data->val_out = mii_regval;
12572
12573 return err;
12574 }
12575
12576 case SIOCSMIIREG:
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012577 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012578 break; /* We have no PHY */
12579
Matt Carlson34eea5a2011-04-20 07:57:38 +000012580 if (!netif_running(dev))
Michael Chanbc1c7562006-03-20 17:48:03 -080012581 return -EAGAIN;
12582
David S. Millerf47c11e2005-06-24 20:18:35 -070012583 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012584 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070012585 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012586
12587 return err;
12588
12589 default:
12590 /* do nothing */
12591 break;
12592 }
12593 return -EOPNOTSUPP;
12594}
12595
David S. Miller15f98502005-05-18 22:49:26 -070012596static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12597{
12598 struct tg3 *tp = netdev_priv(dev);
12599
12600 memcpy(ec, &tp->coal, sizeof(*ec));
12601 return 0;
12602}
12603
Michael Chand244c892005-07-05 14:42:33 -070012604static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12605{
12606 struct tg3 *tp = netdev_priv(dev);
12607 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12608 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12609
Joe Perches63c3a662011-04-26 08:12:10 +000012610 if (!tg3_flag(tp, 5705_PLUS)) {
Michael Chand244c892005-07-05 14:42:33 -070012611 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12612 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12613 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12614 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12615 }
12616
12617 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12618 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12619 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12620 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12621 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12622 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12623 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12624 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12625 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12626 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12627 return -EINVAL;
12628
12629 /* No rx interrupts will be generated if both are zero */
12630 if ((ec->rx_coalesce_usecs == 0) &&
12631 (ec->rx_max_coalesced_frames == 0))
12632 return -EINVAL;
12633
12634 /* No tx interrupts will be generated if both are zero */
12635 if ((ec->tx_coalesce_usecs == 0) &&
12636 (ec->tx_max_coalesced_frames == 0))
12637 return -EINVAL;
12638
12639 /* Only copy relevant parameters, ignore all others. */
12640 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12641 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12642 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12643 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12644 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12645 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12646 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12647 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12648 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12649
12650 if (netif_running(dev)) {
12651 tg3_full_lock(tp, 0);
12652 __tg3_set_coalesce(tp, &tp->coal);
12653 tg3_full_unlock(tp);
12654 }
12655 return 0;
12656}
12657
Jeff Garzik7282d492006-09-13 14:30:00 -040012658static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012659 .get_settings = tg3_get_settings,
12660 .set_settings = tg3_set_settings,
12661 .get_drvinfo = tg3_get_drvinfo,
12662 .get_regs_len = tg3_get_regs_len,
12663 .get_regs = tg3_get_regs,
12664 .get_wol = tg3_get_wol,
12665 .set_wol = tg3_set_wol,
12666 .get_msglevel = tg3_get_msglevel,
12667 .set_msglevel = tg3_set_msglevel,
12668 .nway_reset = tg3_nway_reset,
12669 .get_link = ethtool_op_get_link,
12670 .get_eeprom_len = tg3_get_eeprom_len,
12671 .get_eeprom = tg3_get_eeprom,
12672 .set_eeprom = tg3_set_eeprom,
12673 .get_ringparam = tg3_get_ringparam,
12674 .set_ringparam = tg3_set_ringparam,
12675 .get_pauseparam = tg3_get_pauseparam,
12676 .set_pauseparam = tg3_set_pauseparam,
Michael Chan4cafd3f2005-05-29 14:56:34 -070012677 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012678 .get_strings = tg3_get_strings,
stephen hemminger81b87092011-04-04 08:43:50 +000012679 .set_phys_id = tg3_set_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012680 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070012681 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070012682 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070012683 .get_sset_count = tg3_get_sset_count,
Matt Carlson90415472011-12-16 13:33:23 +000012684 .get_rxnfc = tg3_get_rxnfc,
12685 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12686 .get_rxfh_indir = tg3_get_rxfh_indir,
12687 .set_rxfh_indir = tg3_set_rxfh_indir,
Michael Chan09681692012-09-28 07:12:42 +000012688 .get_channels = tg3_get_channels,
12689 .set_channels = tg3_set_channels,
Richard Cochran3f847492012-04-03 22:59:39 +000012690 .get_ts_info = ethtool_op_get_ts_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012691};
12692
David S. Millerb4017c52012-03-01 17:57:40 -050012693static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12694 struct rtnl_link_stats64 *stats)
12695{
12696 struct tg3 *tp = netdev_priv(dev);
12697
David S. Millerb4017c52012-03-01 17:57:40 -050012698 spin_lock_bh(&tp->lock);
Michael Chan0f566b22012-07-29 19:15:44 +000012699 if (!tp->hw_stats) {
12700 spin_unlock_bh(&tp->lock);
12701 return &tp->net_stats_prev;
12702 }
12703
David S. Millerb4017c52012-03-01 17:57:40 -050012704 tg3_get_nstats(tp, stats);
12705 spin_unlock_bh(&tp->lock);
12706
12707 return stats;
12708}
12709
Matt Carlsonccd5ba92012-02-13 10:20:08 +000012710static void tg3_set_rx_mode(struct net_device *dev)
12711{
12712 struct tg3 *tp = netdev_priv(dev);
12713
12714 if (!netif_running(dev))
12715 return;
12716
12717 tg3_full_lock(tp, 0);
12718 __tg3_set_rx_mode(dev);
12719 tg3_full_unlock(tp);
12720}
12721
Matt Carlsonfaf16272012-02-13 10:20:07 +000012722static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12723 int new_mtu)
12724{
12725 dev->mtu = new_mtu;
12726
12727 if (new_mtu > ETH_DATA_LEN) {
12728 if (tg3_flag(tp, 5780_CLASS)) {
12729 netdev_update_features(dev);
12730 tg3_flag_clear(tp, TSO_CAPABLE);
12731 } else {
12732 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12733 }
12734 } else {
12735 if (tg3_flag(tp, 5780_CLASS)) {
12736 tg3_flag_set(tp, TSO_CAPABLE);
12737 netdev_update_features(dev);
12738 }
12739 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12740 }
12741}
12742
12743static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12744{
12745 struct tg3 *tp = netdev_priv(dev);
Michael Chan2fae5e32012-03-04 14:48:15 +000012746 int err, reset_phy = 0;
Matt Carlsonfaf16272012-02-13 10:20:07 +000012747
12748 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12749 return -EINVAL;
12750
12751 if (!netif_running(dev)) {
12752 /* We'll just catch it later when the
12753 * device is up'd.
12754 */
12755 tg3_set_mtu(dev, tp, new_mtu);
12756 return 0;
12757 }
12758
12759 tg3_phy_stop(tp);
12760
12761 tg3_netif_stop(tp);
12762
12763 tg3_full_lock(tp, 1);
12764
12765 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12766
12767 tg3_set_mtu(dev, tp, new_mtu);
12768
Michael Chan2fae5e32012-03-04 14:48:15 +000012769 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12770 * breaks all requests to 256 bytes.
12771 */
12772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12773 reset_phy = 1;
12774
12775 err = tg3_restart_hw(tp, reset_phy);
Matt Carlsonfaf16272012-02-13 10:20:07 +000012776
12777 if (!err)
12778 tg3_netif_start(tp);
12779
12780 tg3_full_unlock(tp);
12781
12782 if (!err)
12783 tg3_phy_start(tp);
12784
12785 return err;
12786}
12787
12788static const struct net_device_ops tg3_netdev_ops = {
12789 .ndo_open = tg3_open,
12790 .ndo_stop = tg3_close,
12791 .ndo_start_xmit = tg3_start_xmit,
12792 .ndo_get_stats64 = tg3_get_stats64,
12793 .ndo_validate_addr = eth_validate_addr,
12794 .ndo_set_rx_mode = tg3_set_rx_mode,
12795 .ndo_set_mac_address = tg3_set_mac_addr,
12796 .ndo_do_ioctl = tg3_ioctl,
12797 .ndo_tx_timeout = tg3_tx_timeout,
12798 .ndo_change_mtu = tg3_change_mtu,
12799 .ndo_fix_features = tg3_fix_features,
12800 .ndo_set_features = tg3_set_features,
12801#ifdef CONFIG_NET_POLL_CONTROLLER
12802 .ndo_poll_controller = tg3_poll_controller,
12803#endif
12804};
12805
Linus Torvalds1da177e2005-04-16 15:20:36 -070012806static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12807{
Michael Chan1b277772006-03-20 22:27:48 -080012808 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012809
12810 tp->nvram_size = EEPROM_CHIP_SIZE;
12811
Matt Carlsone4f34112009-02-25 14:25:00 +000012812 if (tg3_nvram_read(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012813 return;
12814
Michael Chanb16250e2006-09-27 16:10:14 -070012815 if ((magic != TG3_EEPROM_MAGIC) &&
12816 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12817 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012818 return;
12819
12820 /*
12821 * Size the chip by reading offsets at increasing powers of two.
12822 * When we encounter our validation signature, we know the addressing
12823 * has wrapped around, and thus have our chip size.
12824 */
Michael Chan1b277772006-03-20 22:27:48 -080012825 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012826
12827 while (cursize < tp->nvram_size) {
Matt Carlsone4f34112009-02-25 14:25:00 +000012828 if (tg3_nvram_read(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012829 return;
12830
Michael Chan18201802006-03-20 22:29:15 -080012831 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012832 break;
12833
12834 cursize <<= 1;
12835 }
12836
12837 tp->nvram_size = cursize;
12838}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012839
Linus Torvalds1da177e2005-04-16 15:20:36 -070012840static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12841{
12842 u32 val;
12843
Joe Perches63c3a662011-04-26 08:12:10 +000012844 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080012845 return;
12846
12847 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080012848 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080012849 tg3_get_eeprom_size(tp);
12850 return;
12851 }
12852
Matt Carlson6d348f22009-02-25 14:25:52 +000012853 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012854 if (val != 0) {
Matt Carlson6d348f22009-02-25 14:25:52 +000012855 /* This is confusing. We want to operate on the
12856 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12857 * call will read from NVRAM and byteswap the data
12858 * according to the byteswapping settings for all
12859 * other register accesses. This ensures the data we
12860 * want will always reside in the lower 16-bits.
12861 * However, the data in NVRAM is in LE format, which
12862 * means the data from the NVRAM read will always be
12863 * opposite the endianness of the CPU. The 16-bit
12864 * byteswap then brings the data to CPU endianness.
12865 */
12866 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012867 return;
12868 }
12869 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070012870 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012871}
12872
12873static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12874{
12875 u32 nvcfg1;
12876
12877 nvcfg1 = tr32(NVRAM_CFG1);
12878 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
Joe Perches63c3a662011-04-26 08:12:10 +000012879 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000012880 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012881 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12882 tw32(NVRAM_CFG1, nvcfg1);
12883 }
12884
Matt Carlson6ff6f812011-05-19 12:12:54 +000012885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Joe Perches63c3a662011-04-26 08:12:10 +000012886 tg3_flag(tp, 5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012887 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000012888 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12889 tp->nvram_jedecnum = JEDEC_ATMEL;
12890 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012891 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012892 break;
12893 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12894 tp->nvram_jedecnum = JEDEC_ATMEL;
12895 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12896 break;
12897 case FLASH_VENDOR_ATMEL_EEPROM:
12898 tp->nvram_jedecnum = JEDEC_ATMEL;
12899 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012900 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012901 break;
12902 case FLASH_VENDOR_ST:
12903 tp->nvram_jedecnum = JEDEC_ST;
12904 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012905 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012906 break;
12907 case FLASH_VENDOR_SAIFUN:
12908 tp->nvram_jedecnum = JEDEC_SAIFUN;
12909 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12910 break;
12911 case FLASH_VENDOR_SST_SMALL:
12912 case FLASH_VENDOR_SST_LARGE:
12913 tp->nvram_jedecnum = JEDEC_SST;
12914 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12915 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012916 }
Matt Carlson8590a602009-08-28 12:29:16 +000012917 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012918 tp->nvram_jedecnum = JEDEC_ATMEL;
12919 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000012920 tg3_flag_set(tp, NVRAM_BUFFERED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012921 }
12922}
12923
Matt Carlsona1b950d2009-09-01 13:20:17 +000012924static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12925{
12926 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12927 case FLASH_5752PAGE_SIZE_256:
12928 tp->nvram_pagesize = 256;
12929 break;
12930 case FLASH_5752PAGE_SIZE_512:
12931 tp->nvram_pagesize = 512;
12932 break;
12933 case FLASH_5752PAGE_SIZE_1K:
12934 tp->nvram_pagesize = 1024;
12935 break;
12936 case FLASH_5752PAGE_SIZE_2K:
12937 tp->nvram_pagesize = 2048;
12938 break;
12939 case FLASH_5752PAGE_SIZE_4K:
12940 tp->nvram_pagesize = 4096;
12941 break;
12942 case FLASH_5752PAGE_SIZE_264:
12943 tp->nvram_pagesize = 264;
12944 break;
12945 case FLASH_5752PAGE_SIZE_528:
12946 tp->nvram_pagesize = 528;
12947 break;
12948 }
12949}
12950
Michael Chan361b4ac2005-04-21 17:11:21 -070012951static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12952{
12953 u32 nvcfg1;
12954
12955 nvcfg1 = tr32(NVRAM_CFG1);
12956
Michael Chane6af3012005-04-21 17:12:05 -070012957 /* NVRAM protection for TPM */
12958 if (nvcfg1 & (1 << 27))
Joe Perches63c3a662011-04-26 08:12:10 +000012959 tg3_flag_set(tp, PROTECTED_NVRAM);
Michael Chane6af3012005-04-21 17:12:05 -070012960
Michael Chan361b4ac2005-04-21 17:11:21 -070012961 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000012962 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12963 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12964 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000012965 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000012966 break;
12967 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12968 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000012969 tg3_flag_set(tp, NVRAM_BUFFERED);
12970 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000012971 break;
12972 case FLASH_5752VENDOR_ST_M45PE10:
12973 case FLASH_5752VENDOR_ST_M45PE20:
12974 case FLASH_5752VENDOR_ST_M45PE40:
12975 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000012976 tg3_flag_set(tp, NVRAM_BUFFERED);
12977 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000012978 break;
Michael Chan361b4ac2005-04-21 17:11:21 -070012979 }
12980
Joe Perches63c3a662011-04-26 08:12:10 +000012981 if (tg3_flag(tp, FLASH)) {
Matt Carlsona1b950d2009-09-01 13:20:17 +000012982 tg3_nvram_get_pagesize(tp, nvcfg1);
Matt Carlson8590a602009-08-28 12:29:16 +000012983 } else {
Michael Chan361b4ac2005-04-21 17:11:21 -070012984 /* For eeprom, set pagesize to maximum eeprom size */
12985 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12986
12987 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12988 tw32(NVRAM_CFG1, nvcfg1);
12989 }
12990}
12991
Michael Chand3c7b882006-03-23 01:28:25 -080012992static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12993{
Matt Carlson989a9d22007-05-05 11:51:05 -070012994 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080012995
12996 nvcfg1 = tr32(NVRAM_CFG1);
12997
12998 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070012999 if (nvcfg1 & (1 << 27)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013000 tg3_flag_set(tp, PROTECTED_NVRAM);
Matt Carlson989a9d22007-05-05 11:51:05 -070013001 protect = 1;
13002 }
Michael Chand3c7b882006-03-23 01:28:25 -080013003
Matt Carlson989a9d22007-05-05 11:51:05 -070013004 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13005 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013006 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13007 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13008 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13009 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13010 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013011 tg3_flag_set(tp, NVRAM_BUFFERED);
13012 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013013 tp->nvram_pagesize = 264;
13014 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13015 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13016 tp->nvram_size = (protect ? 0x3e200 :
13017 TG3_NVRAM_SIZE_512KB);
13018 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13019 tp->nvram_size = (protect ? 0x1f200 :
13020 TG3_NVRAM_SIZE_256KB);
13021 else
13022 tp->nvram_size = (protect ? 0x1f200 :
13023 TG3_NVRAM_SIZE_128KB);
13024 break;
13025 case FLASH_5752VENDOR_ST_M45PE10:
13026 case FLASH_5752VENDOR_ST_M45PE20:
13027 case FLASH_5752VENDOR_ST_M45PE40:
13028 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013029 tg3_flag_set(tp, NVRAM_BUFFERED);
13030 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013031 tp->nvram_pagesize = 256;
13032 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13033 tp->nvram_size = (protect ?
13034 TG3_NVRAM_SIZE_64KB :
13035 TG3_NVRAM_SIZE_128KB);
13036 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13037 tp->nvram_size = (protect ?
13038 TG3_NVRAM_SIZE_64KB :
13039 TG3_NVRAM_SIZE_256KB);
13040 else
13041 tp->nvram_size = (protect ?
13042 TG3_NVRAM_SIZE_128KB :
13043 TG3_NVRAM_SIZE_512KB);
13044 break;
Michael Chand3c7b882006-03-23 01:28:25 -080013045 }
13046}
13047
Michael Chan1b277772006-03-20 22:27:48 -080013048static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13049{
13050 u32 nvcfg1;
13051
13052 nvcfg1 = tr32(NVRAM_CFG1);
13053
13054 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000013055 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13056 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13057 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13058 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13059 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013060 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000013061 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
Michael Chan1b277772006-03-20 22:27:48 -080013062
Matt Carlson8590a602009-08-28 12:29:16 +000013063 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13064 tw32(NVRAM_CFG1, nvcfg1);
13065 break;
13066 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13067 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13068 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13069 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13070 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013071 tg3_flag_set(tp, NVRAM_BUFFERED);
13072 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013073 tp->nvram_pagesize = 264;
13074 break;
13075 case FLASH_5752VENDOR_ST_M45PE10:
13076 case FLASH_5752VENDOR_ST_M45PE20:
13077 case FLASH_5752VENDOR_ST_M45PE40:
13078 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013079 tg3_flag_set(tp, NVRAM_BUFFERED);
13080 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013081 tp->nvram_pagesize = 256;
13082 break;
Michael Chan1b277772006-03-20 22:27:48 -080013083 }
13084}
13085
Matt Carlson6b91fa02007-10-10 18:01:09 -070013086static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13087{
13088 u32 nvcfg1, protect = 0;
13089
13090 nvcfg1 = tr32(NVRAM_CFG1);
13091
13092 /* NVRAM protection for TPM */
13093 if (nvcfg1 & (1 << 27)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013094 tg3_flag_set(tp, PROTECTED_NVRAM);
Matt Carlson6b91fa02007-10-10 18:01:09 -070013095 protect = 1;
13096 }
13097
13098 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13099 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013100 case FLASH_5761VENDOR_ATMEL_ADB021D:
13101 case FLASH_5761VENDOR_ATMEL_ADB041D:
13102 case FLASH_5761VENDOR_ATMEL_ADB081D:
13103 case FLASH_5761VENDOR_ATMEL_ADB161D:
13104 case FLASH_5761VENDOR_ATMEL_MDB021D:
13105 case FLASH_5761VENDOR_ATMEL_MDB041D:
13106 case FLASH_5761VENDOR_ATMEL_MDB081D:
13107 case FLASH_5761VENDOR_ATMEL_MDB161D:
13108 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013109 tg3_flag_set(tp, NVRAM_BUFFERED);
13110 tg3_flag_set(tp, FLASH);
13111 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson8590a602009-08-28 12:29:16 +000013112 tp->nvram_pagesize = 256;
13113 break;
13114 case FLASH_5761VENDOR_ST_A_M45PE20:
13115 case FLASH_5761VENDOR_ST_A_M45PE40:
13116 case FLASH_5761VENDOR_ST_A_M45PE80:
13117 case FLASH_5761VENDOR_ST_A_M45PE16:
13118 case FLASH_5761VENDOR_ST_M_M45PE20:
13119 case FLASH_5761VENDOR_ST_M_M45PE40:
13120 case FLASH_5761VENDOR_ST_M_M45PE80:
13121 case FLASH_5761VENDOR_ST_M_M45PE16:
13122 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013123 tg3_flag_set(tp, NVRAM_BUFFERED);
13124 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013125 tp->nvram_pagesize = 256;
13126 break;
Matt Carlson6b91fa02007-10-10 18:01:09 -070013127 }
13128
13129 if (protect) {
13130 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13131 } else {
13132 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013133 case FLASH_5761VENDOR_ATMEL_ADB161D:
13134 case FLASH_5761VENDOR_ATMEL_MDB161D:
13135 case FLASH_5761VENDOR_ST_A_M45PE16:
13136 case FLASH_5761VENDOR_ST_M_M45PE16:
13137 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13138 break;
13139 case FLASH_5761VENDOR_ATMEL_ADB081D:
13140 case FLASH_5761VENDOR_ATMEL_MDB081D:
13141 case FLASH_5761VENDOR_ST_A_M45PE80:
13142 case FLASH_5761VENDOR_ST_M_M45PE80:
13143 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13144 break;
13145 case FLASH_5761VENDOR_ATMEL_ADB041D:
13146 case FLASH_5761VENDOR_ATMEL_MDB041D:
13147 case FLASH_5761VENDOR_ST_A_M45PE40:
13148 case FLASH_5761VENDOR_ST_M_M45PE40:
13149 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13150 break;
13151 case FLASH_5761VENDOR_ATMEL_ADB021D:
13152 case FLASH_5761VENDOR_ATMEL_MDB021D:
13153 case FLASH_5761VENDOR_ST_A_M45PE20:
13154 case FLASH_5761VENDOR_ST_M_M45PE20:
13155 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13156 break;
Matt Carlson6b91fa02007-10-10 18:01:09 -070013157 }
13158 }
13159}
13160
Michael Chanb5d37722006-09-27 16:06:21 -070013161static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13162{
13163 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013164 tg3_flag_set(tp, NVRAM_BUFFERED);
Michael Chanb5d37722006-09-27 16:06:21 -070013165 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13166}
13167
Matt Carlson321d32a2008-11-21 17:22:19 -080013168static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13169{
13170 u32 nvcfg1;
13171
13172 nvcfg1 = tr32(NVRAM_CFG1);
13173
13174 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13175 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13176 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13177 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013178 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson321d32a2008-11-21 17:22:19 -080013179 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13180
13181 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13182 tw32(NVRAM_CFG1, nvcfg1);
13183 return;
13184 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13185 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13186 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13187 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13188 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13189 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13190 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13191 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013192 tg3_flag_set(tp, NVRAM_BUFFERED);
13193 tg3_flag_set(tp, FLASH);
Matt Carlson321d32a2008-11-21 17:22:19 -080013194
13195 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13196 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13197 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13198 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13199 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13200 break;
13201 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13202 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13203 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13204 break;
13205 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13206 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13207 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13208 break;
13209 }
13210 break;
13211 case FLASH_5752VENDOR_ST_M45PE10:
13212 case FLASH_5752VENDOR_ST_M45PE20:
13213 case FLASH_5752VENDOR_ST_M45PE40:
13214 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013215 tg3_flag_set(tp, NVRAM_BUFFERED);
13216 tg3_flag_set(tp, FLASH);
Matt Carlson321d32a2008-11-21 17:22:19 -080013217
13218 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13219 case FLASH_5752VENDOR_ST_M45PE10:
13220 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13221 break;
13222 case FLASH_5752VENDOR_ST_M45PE20:
13223 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13224 break;
13225 case FLASH_5752VENDOR_ST_M45PE40:
13226 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13227 break;
13228 }
13229 break;
13230 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013231 tg3_flag_set(tp, NO_NVRAM);
Matt Carlson321d32a2008-11-21 17:22:19 -080013232 return;
13233 }
13234
Matt Carlsona1b950d2009-09-01 13:20:17 +000013235 tg3_nvram_get_pagesize(tp, nvcfg1);
13236 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013237 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013238}
13239
13240
13241static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13242{
13243 u32 nvcfg1;
13244
13245 nvcfg1 = tr32(NVRAM_CFG1);
13246
13247 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13248 case FLASH_5717VENDOR_ATMEL_EEPROM:
13249 case FLASH_5717VENDOR_MICRO_EEPROM:
13250 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013251 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013252 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13253
13254 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13255 tw32(NVRAM_CFG1, nvcfg1);
13256 return;
13257 case FLASH_5717VENDOR_ATMEL_MDB011D:
13258 case FLASH_5717VENDOR_ATMEL_ADB011B:
13259 case FLASH_5717VENDOR_ATMEL_ADB011D:
13260 case FLASH_5717VENDOR_ATMEL_MDB021D:
13261 case FLASH_5717VENDOR_ATMEL_ADB021B:
13262 case FLASH_5717VENDOR_ATMEL_ADB021D:
13263 case FLASH_5717VENDOR_ATMEL_45USPT:
13264 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013265 tg3_flag_set(tp, NVRAM_BUFFERED);
13266 tg3_flag_set(tp, FLASH);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013267
13268 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13269 case FLASH_5717VENDOR_ATMEL_MDB021D:
Matt Carlson66ee33b2011-04-05 14:22:51 +000013270 /* Detect size with tg3_nvram_get_size() */
13271 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013272 case FLASH_5717VENDOR_ATMEL_ADB021B:
13273 case FLASH_5717VENDOR_ATMEL_ADB021D:
13274 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13275 break;
13276 default:
13277 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13278 break;
13279 }
Matt Carlson321d32a2008-11-21 17:22:19 -080013280 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013281 case FLASH_5717VENDOR_ST_M_M25PE10:
13282 case FLASH_5717VENDOR_ST_A_M25PE10:
13283 case FLASH_5717VENDOR_ST_M_M45PE10:
13284 case FLASH_5717VENDOR_ST_A_M45PE10:
13285 case FLASH_5717VENDOR_ST_M_M25PE20:
13286 case FLASH_5717VENDOR_ST_A_M25PE20:
13287 case FLASH_5717VENDOR_ST_M_M45PE20:
13288 case FLASH_5717VENDOR_ST_A_M45PE20:
13289 case FLASH_5717VENDOR_ST_25USPT:
13290 case FLASH_5717VENDOR_ST_45USPT:
13291 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013292 tg3_flag_set(tp, NVRAM_BUFFERED);
13293 tg3_flag_set(tp, FLASH);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013294
13295 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13296 case FLASH_5717VENDOR_ST_M_M25PE20:
Matt Carlsona1b950d2009-09-01 13:20:17 +000013297 case FLASH_5717VENDOR_ST_M_M45PE20:
Matt Carlson66ee33b2011-04-05 14:22:51 +000013298 /* Detect size with tg3_nvram_get_size() */
13299 break;
13300 case FLASH_5717VENDOR_ST_A_M25PE20:
Matt Carlsona1b950d2009-09-01 13:20:17 +000013301 case FLASH_5717VENDOR_ST_A_M45PE20:
13302 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13303 break;
13304 default:
13305 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13306 break;
13307 }
Matt Carlson321d32a2008-11-21 17:22:19 -080013308 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013309 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013310 tg3_flag_set(tp, NO_NVRAM);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013311 return;
Matt Carlson321d32a2008-11-21 17:22:19 -080013312 }
Matt Carlsona1b950d2009-09-01 13:20:17 +000013313
13314 tg3_nvram_get_pagesize(tp, nvcfg1);
13315 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013316 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson321d32a2008-11-21 17:22:19 -080013317}
13318
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013319static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13320{
13321 u32 nvcfg1, nvmpinstrp;
13322
13323 nvcfg1 = tr32(NVRAM_CFG1);
13324 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13325
13326 switch (nvmpinstrp) {
13327 case FLASH_5720_EEPROM_HD:
13328 case FLASH_5720_EEPROM_LD:
13329 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013330 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013331
13332 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13333 tw32(NVRAM_CFG1, nvcfg1);
13334 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13335 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13336 else
13337 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13338 return;
13339 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13340 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13341 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13342 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13343 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13344 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13345 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13346 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13347 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13348 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13349 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13350 case FLASH_5720VENDOR_ATMEL_45USPT:
13351 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013352 tg3_flag_set(tp, NVRAM_BUFFERED);
13353 tg3_flag_set(tp, FLASH);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013354
13355 switch (nvmpinstrp) {
13356 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13357 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13358 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13359 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13360 break;
13361 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13362 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13363 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13364 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13365 break;
13366 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13367 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13368 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13369 break;
13370 default:
13371 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13372 break;
13373 }
13374 break;
13375 case FLASH_5720VENDOR_M_ST_M25PE10:
13376 case FLASH_5720VENDOR_M_ST_M45PE10:
13377 case FLASH_5720VENDOR_A_ST_M25PE10:
13378 case FLASH_5720VENDOR_A_ST_M45PE10:
13379 case FLASH_5720VENDOR_M_ST_M25PE20:
13380 case FLASH_5720VENDOR_M_ST_M45PE20:
13381 case FLASH_5720VENDOR_A_ST_M25PE20:
13382 case FLASH_5720VENDOR_A_ST_M45PE20:
13383 case FLASH_5720VENDOR_M_ST_M25PE40:
13384 case FLASH_5720VENDOR_M_ST_M45PE40:
13385 case FLASH_5720VENDOR_A_ST_M25PE40:
13386 case FLASH_5720VENDOR_A_ST_M45PE40:
13387 case FLASH_5720VENDOR_M_ST_M25PE80:
13388 case FLASH_5720VENDOR_M_ST_M45PE80:
13389 case FLASH_5720VENDOR_A_ST_M25PE80:
13390 case FLASH_5720VENDOR_A_ST_M45PE80:
13391 case FLASH_5720VENDOR_ST_25USPT:
13392 case FLASH_5720VENDOR_ST_45USPT:
13393 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013394 tg3_flag_set(tp, NVRAM_BUFFERED);
13395 tg3_flag_set(tp, FLASH);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013396
13397 switch (nvmpinstrp) {
13398 case FLASH_5720VENDOR_M_ST_M25PE20:
13399 case FLASH_5720VENDOR_M_ST_M45PE20:
13400 case FLASH_5720VENDOR_A_ST_M25PE20:
13401 case FLASH_5720VENDOR_A_ST_M45PE20:
13402 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13403 break;
13404 case FLASH_5720VENDOR_M_ST_M25PE40:
13405 case FLASH_5720VENDOR_M_ST_M45PE40:
13406 case FLASH_5720VENDOR_A_ST_M25PE40:
13407 case FLASH_5720VENDOR_A_ST_M45PE40:
13408 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13409 break;
13410 case FLASH_5720VENDOR_M_ST_M25PE80:
13411 case FLASH_5720VENDOR_M_ST_M45PE80:
13412 case FLASH_5720VENDOR_A_ST_M25PE80:
13413 case FLASH_5720VENDOR_A_ST_M45PE80:
13414 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13415 break;
13416 default:
13417 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13418 break;
13419 }
13420 break;
13421 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013422 tg3_flag_set(tp, NO_NVRAM);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013423 return;
13424 }
13425
13426 tg3_nvram_get_pagesize(tp, nvcfg1);
13427 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013428 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013429}
13430
Linus Torvalds1da177e2005-04-16 15:20:36 -070013431/* Chips other than 5700/5701 use the NVRAM for fetching info. */
13432static void __devinit tg3_nvram_init(struct tg3 *tp)
13433{
Linus Torvalds1da177e2005-04-16 15:20:36 -070013434 tw32_f(GRC_EEPROM_ADDR,
13435 (EEPROM_ADDR_FSM_RESET |
13436 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13437 EEPROM_ADDR_CLKPERD_SHIFT)));
13438
Michael Chan9d57f012006-12-07 00:23:25 -080013439 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013440
13441 /* Enable seeprom accesses. */
13442 tw32_f(GRC_LOCAL_CTRL,
13443 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13444 udelay(100);
13445
13446 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13447 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
Joe Perches63c3a662011-04-26 08:12:10 +000013448 tg3_flag_set(tp, NVRAM);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013449
Michael Chanec41c7d2006-01-17 02:40:55 -080013450 if (tg3_nvram_lock(tp)) {
Matt Carlson5129c3a2010-04-05 10:19:23 +000013451 netdev_warn(tp->dev,
13452 "Cannot get nvram lock, %s failed\n",
Joe Perches05dbe002010-02-17 19:44:19 +000013453 __func__);
Michael Chanec41c7d2006-01-17 02:40:55 -080013454 return;
13455 }
Michael Chane6af3012005-04-21 17:12:05 -070013456 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013457
Matt Carlson989a9d22007-05-05 11:51:05 -070013458 tp->nvram_size = 0;
13459
Michael Chan361b4ac2005-04-21 17:11:21 -070013460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13461 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080013462 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13463 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070013464 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080013467 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070013468 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13469 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070013470 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13471 tg3_get_5906_nvram_info(tp);
Matt Carlsonb703df62009-12-03 08:36:21 +000013472 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
Matt Carlson55086ad2011-12-14 11:09:59 +000013473 tg3_flag(tp, 57765_CLASS))
Matt Carlson321d32a2008-11-21 17:22:19 -080013474 tg3_get_57780_nvram_info(tp);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013475 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
Matt Carlsona1b950d2009-09-01 13:20:17 +000013477 tg3_get_5717_nvram_info(tp);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013478 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13479 tg3_get_5720_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070013480 else
13481 tg3_get_nvram_info(tp);
13482
Matt Carlson989a9d22007-05-05 11:51:05 -070013483 if (tp->nvram_size == 0)
13484 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013485
Michael Chane6af3012005-04-21 17:12:05 -070013486 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080013487 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013488
13489 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000013490 tg3_flag_clear(tp, NVRAM);
13491 tg3_flag_clear(tp, NVRAM_BUFFERED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013492
13493 tg3_get_eeprom_size(tp);
13494 }
13495}
13496
Linus Torvalds1da177e2005-04-16 15:20:36 -070013497struct subsys_tbl_ent {
13498 u16 subsys_vendor, subsys_devid;
13499 u32 phy_id;
13500};
13501
Matt Carlson24daf2b2010-02-17 15:17:02 +000013502static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013503 /* Broadcom boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013504 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013505 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013506 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013507 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013508 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013509 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013510 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13511 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13512 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013513 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013514 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013515 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013516 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13517 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13518 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013519 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013520 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013521 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013522 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013523 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013524 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013525 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013526
13527 /* 3com boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013528 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013529 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013530 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013531 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013532 { TG3PCI_SUBVENDOR_ID_3COM,
13533 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13534 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013535 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013536 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000013537 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013538
13539 /* DELL boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013540 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013541 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013542 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013543 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013544 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013545 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013546 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000013547 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013548
13549 /* Compaq boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013550 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013551 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013552 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013553 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013554 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13555 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13556 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013557 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000013558 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000013559 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070013560
13561 /* IBM boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013562 { TG3PCI_SUBVENDOR_ID_IBM,
13563 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013564};
13565
Matt Carlson24daf2b2010-02-17 15:17:02 +000013566static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013567{
13568 int i;
13569
13570 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13571 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13572 tp->pdev->subsystem_vendor) &&
13573 (subsys_id_to_phy_id[i].subsys_devid ==
13574 tp->pdev->subsystem_device))
13575 return &subsys_id_to_phy_id[i];
13576 }
13577 return NULL;
13578}
13579
Michael Chan7d0c41e2005-04-21 17:06:20 -070013580static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013581{
Linus Torvalds1da177e2005-04-16 15:20:36 -070013582 u32 val;
David S. Millerf49639e2006-06-09 11:58:36 -070013583
Matt Carlson79eb6902010-02-17 15:17:03 +000013584 tp->phy_id = TG3_PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070013585 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13586
Gary Zambranoa85feb82007-05-05 11:52:19 -070013587 /* Assume an onboard device and WOL capable by default. */
Joe Perches63c3a662011-04-26 08:12:10 +000013588 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13589 tg3_flag_set(tp, WOL_CAP);
David S. Miller72b845e2006-03-14 14:11:48 -080013590
Michael Chanb5d37722006-09-27 16:06:21 -070013591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080013592 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013593 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13594 tg3_flag_set(tp, IS_NIC);
Michael Chan9d26e212006-12-07 00:21:14 -080013595 }
Matt Carlson0527ba32007-10-10 18:03:30 -070013596 val = tr32(VCPU_CFGSHDW);
13597 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Joe Perches63c3a662011-04-26 08:12:10 +000013598 tg3_flag_set(tp, ASPM_WORKAROUND);
Matt Carlson0527ba32007-10-10 18:03:30 -070013599 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013600 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013601 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013602 device_set_wakeup_enable(&tp->pdev->dev, true);
13603 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080013604 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070013605 }
13606
Linus Torvalds1da177e2005-04-16 15:20:36 -070013607 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13608 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13609 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070013610 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070013611 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013612
13613 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13614 tp->nic_sram_data_cfg = nic_cfg;
13615
13616 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13617 ver >>= NIC_SRAM_DATA_VER_SHIFT;
Matt Carlson6ff6f812011-05-19 12:12:54 +000013618 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13619 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13620 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070013621 (ver > 0) && (ver < 0x100))
13622 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13623
Matt Carlsona9daf362008-05-25 23:49:44 -070013624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13625 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13626
Linus Torvalds1da177e2005-04-16 15:20:36 -070013627 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13628 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13629 eeprom_phy_serdes = 1;
13630
13631 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13632 if (nic_phy_id != 0) {
13633 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13634 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13635
13636 eeprom_phy_id = (id1 >> 16) << 10;
13637 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13638 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13639 } else
13640 eeprom_phy_id = 0;
13641
Michael Chan7d0c41e2005-04-21 17:06:20 -070013642 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070013643 if (eeprom_phy_serdes) {
Joe Perches63c3a662011-04-26 08:12:10 +000013644 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013645 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Matt Carlsona50d0792010-06-05 17:24:37 +000013646 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013647 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
Michael Chan747e8f82005-07-25 12:33:22 -070013648 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070013649
Joe Perches63c3a662011-04-26 08:12:10 +000013650 if (tg3_flag(tp, 5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -070013651 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13652 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070013653 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070013654 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13655
13656 switch (led_cfg) {
13657 default:
13658 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13659 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13660 break;
13661
13662 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13663 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13664 break;
13665
13666 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13667 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070013668
13669 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13670 * read on some older 5700/5701 bootcode.
13671 */
13672 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13673 ASIC_REV_5700 ||
13674 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13675 ASIC_REV_5701)
13676 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13677
Linus Torvalds1da177e2005-04-16 15:20:36 -070013678 break;
13679
13680 case SHASTA_EXT_LED_SHARED:
13681 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13682 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13683 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13684 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13685 LED_CTRL_MODE_PHY_2);
13686 break;
13687
13688 case SHASTA_EXT_LED_MAC:
13689 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13690 break;
13691
13692 case SHASTA_EXT_LED_COMBO:
13693 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13694 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13695 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13696 LED_CTRL_MODE_PHY_2);
13697 break;
13698
Stephen Hemminger855e1112008-04-16 16:37:28 -070013699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013700
13701 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13703 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13704 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13705
Matt Carlsonb2a5c192008-04-03 21:44:44 -070013706 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13707 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080013708
Michael Chan9d26e212006-12-07 00:21:14 -080013709 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Joe Perches63c3a662011-04-26 08:12:10 +000013710 tg3_flag_set(tp, EEPROM_WRITE_PROT);
Michael Chan9d26e212006-12-07 00:21:14 -080013711 if ((tp->pdev->subsystem_vendor ==
13712 PCI_VENDOR_ID_ARIMA) &&
13713 (tp->pdev->subsystem_device == 0x205a ||
13714 tp->pdev->subsystem_device == 0x2063))
Joe Perches63c3a662011-04-26 08:12:10 +000013715 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
Michael Chan9d26e212006-12-07 00:21:14 -080013716 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000013717 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13718 tg3_flag_set(tp, IS_NIC);
Michael Chan9d26e212006-12-07 00:21:14 -080013719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013720
13721 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
Joe Perches63c3a662011-04-26 08:12:10 +000013722 tg3_flag_set(tp, ENABLE_ASF);
13723 if (tg3_flag(tp, 5750_PLUS))
13724 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013725 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080013726
13727 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
Joe Perches63c3a662011-04-26 08:12:10 +000013728 tg3_flag(tp, 5750_PLUS))
13729 tg3_flag_set(tp, ENABLE_APE);
Matt Carlsonb2b98d42008-11-03 16:52:32 -080013730
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013731 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
Gary Zambranoa85feb82007-05-05 11:52:19 -070013732 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
Joe Perches63c3a662011-04-26 08:12:10 +000013733 tg3_flag_clear(tp, WOL_CAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013734
Joe Perches63c3a662011-04-26 08:12:10 +000013735 if (tg3_flag(tp, WOL_CAP) &&
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013736 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013737 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000013738 device_set_wakeup_enable(&tp->pdev->dev, true);
13739 }
Matt Carlson0527ba32007-10-10 18:03:30 -070013740
Linus Torvalds1da177e2005-04-16 15:20:36 -070013741 if (cfg2 & (1 << 17))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013742 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013743
13744 /* serdes signal pre-emphasis in register 0x590 set by */
13745 /* bootcode if bit 18 is set */
13746 if (cfg2 & (1 << 18))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013747 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070013748
Joe Perches63c3a662011-04-26 08:12:10 +000013749 if ((tg3_flag(tp, 57765_PLUS) ||
13750 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13751 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
Matt Carlson6833c042008-11-21 17:18:59 -080013752 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013753 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
Matt Carlson6833c042008-11-21 17:18:59 -080013754
Joe Perches63c3a662011-04-26 08:12:10 +000013755 if (tg3_flag(tp, PCI_EXPRESS) &&
Matt Carlson8c69b1e2010-08-02 11:26:00 +000013756 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +000013757 !tg3_flag(tp, 57765_PLUS)) {
Matt Carlson8ed5d972007-05-07 00:25:49 -070013758 u32 cfg3;
13759
13760 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13761 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
Joe Perches63c3a662011-04-26 08:12:10 +000013762 tg3_flag_set(tp, ASPM_WORKAROUND);
Matt Carlson8ed5d972007-05-07 00:25:49 -070013763 }
Matt Carlsona9daf362008-05-25 23:49:44 -070013764
Matt Carlson14417062010-02-17 15:16:59 +000013765 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
Joe Perches63c3a662011-04-26 08:12:10 +000013766 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
Matt Carlsona9daf362008-05-25 23:49:44 -070013767 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
Joe Perches63c3a662011-04-26 08:12:10 +000013768 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
Matt Carlsona9daf362008-05-25 23:49:44 -070013769 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
Joe Perches63c3a662011-04-26 08:12:10 +000013770 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013771 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080013772done:
Joe Perches63c3a662011-04-26 08:12:10 +000013773 if (tg3_flag(tp, WOL_CAP))
Rafael J. Wysocki43067ed2011-02-10 06:53:09 +000013774 device_set_wakeup_enable(&tp->pdev->dev,
Joe Perches63c3a662011-04-26 08:12:10 +000013775 tg3_flag(tp, WOL_ENABLE));
Rafael J. Wysocki43067ed2011-02-10 06:53:09 +000013776 else
13777 device_set_wakeup_capable(&tp->pdev->dev, false);
Michael Chan7d0c41e2005-04-21 17:06:20 -070013778}
13779
Matt Carlsonb2a5c192008-04-03 21:44:44 -070013780static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13781{
13782 int i;
13783 u32 val;
13784
13785 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13786 tw32(OTP_CTRL, cmd);
13787
13788 /* Wait for up to 1 ms for command to execute. */
13789 for (i = 0; i < 100; i++) {
13790 val = tr32(OTP_STATUS);
13791 if (val & OTP_STATUS_CMD_DONE)
13792 break;
13793 udelay(10);
13794 }
13795
13796 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13797}
13798
13799/* Read the gphy configuration from the OTP region of the chip. The gphy
13800 * configuration is a 32-bit value that straddles the alignment boundary.
13801 * We do two 32-bit reads and then shift and merge the results.
13802 */
13803static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13804{
13805 u32 bhalf_otp, thalf_otp;
13806
13807 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13808
13809 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13810 return 0;
13811
13812 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13813
13814 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13815 return 0;
13816
13817 thalf_otp = tr32(OTP_READ_DATA);
13818
13819 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13820
13821 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13822 return 0;
13823
13824 bhalf_otp = tr32(OTP_READ_DATA);
13825
13826 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13827}
13828
Matt Carlsone256f8a2011-03-09 16:58:24 +000013829static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13830{
Hiroaki SHIMODA202ff1c2011-11-22 04:05:41 +000013831 u32 adv = ADVERTISED_Autoneg;
Matt Carlsone256f8a2011-03-09 16:58:24 +000013832
13833 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13834 adv |= ADVERTISED_1000baseT_Half |
13835 ADVERTISED_1000baseT_Full;
13836
13837 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13838 adv |= ADVERTISED_100baseT_Half |
13839 ADVERTISED_100baseT_Full |
13840 ADVERTISED_10baseT_Half |
13841 ADVERTISED_10baseT_Full |
13842 ADVERTISED_TP;
13843 else
13844 adv |= ADVERTISED_FIBRE;
13845
13846 tp->link_config.advertising = adv;
Matt Carlsone7405222012-02-13 15:20:16 +000013847 tp->link_config.speed = SPEED_UNKNOWN;
13848 tp->link_config.duplex = DUPLEX_UNKNOWN;
Matt Carlsone256f8a2011-03-09 16:58:24 +000013849 tp->link_config.autoneg = AUTONEG_ENABLE;
Matt Carlsone7405222012-02-13 15:20:16 +000013850 tp->link_config.active_speed = SPEED_UNKNOWN;
13851 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
Matt Carlson34655ad2012-02-22 12:35:18 +000013852
13853 tp->old_link = -1;
Matt Carlsone256f8a2011-03-09 16:58:24 +000013854}
13855
Michael Chan7d0c41e2005-04-21 17:06:20 -070013856static int __devinit tg3_phy_probe(struct tg3 *tp)
13857{
13858 u32 hw_phy_id_1, hw_phy_id_2;
13859 u32 hw_phy_id, hw_phy_id_masked;
13860 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013861
Matt Carlsone256f8a2011-03-09 16:58:24 +000013862 /* flow control autonegotiation is default behavior */
Joe Perches63c3a662011-04-26 08:12:10 +000013863 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlsone256f8a2011-03-09 16:58:24 +000013864 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13865
Michael Chan8151ad52012-07-29 19:15:41 +000013866 if (tg3_flag(tp, ENABLE_APE)) {
13867 switch (tp->pci_fn) {
13868 case 0:
13869 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13870 break;
13871 case 1:
13872 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13873 break;
13874 case 2:
13875 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13876 break;
13877 case 3:
13878 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13879 break;
13880 }
13881 }
13882
Joe Perches63c3a662011-04-26 08:12:10 +000013883 if (tg3_flag(tp, USE_PHYLIB))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013884 return tg3_phy_init(tp);
13885
Linus Torvalds1da177e2005-04-16 15:20:36 -070013886 /* Reading the PHY ID register can conflict with ASF
Nick Andrew877d0312009-01-26 11:06:57 +010013887 * firmware access to the PHY hardware.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013888 */
13889 err = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000013890 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
Matt Carlson79eb6902010-02-17 15:17:03 +000013891 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013892 } else {
13893 /* Now read the physical PHY_ID from the chip and verify
13894 * that it is sane. If it doesn't look good, we fall back
13895 * to either the hard-coded table based PHY_ID and failing
13896 * that the value found in the eeprom area.
13897 */
13898 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13899 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13900
13901 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13902 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13903 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13904
Matt Carlson79eb6902010-02-17 15:17:03 +000013905 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013906 }
13907
Matt Carlson79eb6902010-02-17 15:17:03 +000013908 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013909 tp->phy_id = hw_phy_id;
Matt Carlson79eb6902010-02-17 15:17:03 +000013910 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013911 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070013912 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013913 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013914 } else {
Matt Carlson79eb6902010-02-17 15:17:03 +000013915 if (tp->phy_id != TG3_PHY_ID_INVALID) {
Michael Chan7d0c41e2005-04-21 17:06:20 -070013916 /* Do nothing, phy ID already set up in
13917 * tg3_get_eeprom_hw_cfg().
13918 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013919 } else {
13920 struct subsys_tbl_ent *p;
13921
13922 /* No eeprom signature? Try the hardcoded
13923 * subsys device table.
13924 */
Matt Carlson24daf2b2010-02-17 15:17:02 +000013925 p = tg3_lookup_by_subsys(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013926 if (!p)
13927 return -ENODEV;
13928
13929 tp->phy_id = p->phy_id;
13930 if (!tp->phy_id ||
Matt Carlson79eb6902010-02-17 15:17:03 +000013931 tp->phy_id == TG3_PHY_ID_BCM8002)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013932 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013933 }
13934 }
13935
Matt Carlsona6b68da2010-12-06 08:28:52 +000013936 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
Matt Carlson5baa5e92011-07-20 10:20:53 +000013937 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13939 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
Matt Carlsona6b68da2010-12-06 08:28:52 +000013940 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13941 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13942 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
Matt Carlson52b02d02010-10-14 10:37:41 +000013943 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13944
Matt Carlsone256f8a2011-03-09 16:58:24 +000013945 tg3_phy_init_link_config(tp);
13946
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013947 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +000013948 !tg3_flag(tp, ENABLE_APE) &&
13949 !tg3_flag(tp, ENABLE_ASF)) {
Matt Carlsone2bf73e2011-12-08 14:40:15 +000013950 u32 bmsr, dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013951
13952 tg3_readphy(tp, MII_BMSR, &bmsr);
13953 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13954 (bmsr & BMSR_LSTATUS))
13955 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040013956
Linus Torvalds1da177e2005-04-16 15:20:36 -070013957 err = tg3_phy_reset(tp);
13958 if (err)
13959 return err;
13960
Matt Carlson42b64a42011-05-19 12:12:49 +000013961 tg3_phy_set_wirespeed(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013962
Matt Carlsone2bf73e2011-12-08 14:40:15 +000013963 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
Matt Carlson42b64a42011-05-19 12:12:49 +000013964 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13965 tp->link_config.flowctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013966
13967 tg3_writephy(tp, MII_BMCR,
13968 BMCR_ANENABLE | BMCR_ANRESTART);
13969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013970 }
13971
13972skip_phy_reset:
Matt Carlson79eb6902010-02-17 15:17:03 +000013973 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013974 err = tg3_init_5401phy_dsp(tp);
13975 if (err)
13976 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013977
Linus Torvalds1da177e2005-04-16 15:20:36 -070013978 err = tg3_init_5401phy_dsp(tp);
13979 }
13980
Linus Torvalds1da177e2005-04-16 15:20:36 -070013981 return err;
13982}
13983
Matt Carlson184b8902010-04-05 10:19:25 +000013984static void __devinit tg3_read_vpd(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013985{
Matt Carlsona4a8bb12010-09-15 09:00:00 +000013986 u8 *vpd_data;
Matt Carlson4181b2c2010-02-26 14:04:45 +000013987 unsigned int block_end, rosize, len;
Matt Carlson535a4902011-07-20 10:20:56 +000013988 u32 vpdlen;
Matt Carlson184b8902010-04-05 10:19:25 +000013989 int j, i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013990
Matt Carlson535a4902011-07-20 10:20:56 +000013991 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
Matt Carlsona4a8bb12010-09-15 09:00:00 +000013992 if (!vpd_data)
13993 goto out_no_vpd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013994
Matt Carlson535a4902011-07-20 10:20:56 +000013995 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
Matt Carlson4181b2c2010-02-26 14:04:45 +000013996 if (i < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013997 goto out_not_found;
Matt Carlson4181b2c2010-02-26 14:04:45 +000013998
13999 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14000 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14001 i += PCI_VPD_LRDT_TAG_SIZE;
14002
Matt Carlson535a4902011-07-20 10:20:56 +000014003 if (block_end > vpdlen)
Matt Carlson4181b2c2010-02-26 14:04:45 +000014004 goto out_not_found;
14005
Matt Carlson184b8902010-04-05 10:19:25 +000014006 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14007 PCI_VPD_RO_KEYWORD_MFR_ID);
14008 if (j > 0) {
14009 len = pci_vpd_info_field_size(&vpd_data[j]);
14010
14011 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14012 if (j + len > block_end || len != 4 ||
14013 memcmp(&vpd_data[j], "1028", 4))
14014 goto partno;
14015
14016 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14017 PCI_VPD_RO_KEYWORD_VENDOR0);
14018 if (j < 0)
14019 goto partno;
14020
14021 len = pci_vpd_info_field_size(&vpd_data[j]);
14022
14023 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14024 if (j + len > block_end)
14025 goto partno;
14026
14027 memcpy(tp->fw_ver, &vpd_data[j], len);
Matt Carlson535a4902011-07-20 10:20:56 +000014028 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
Matt Carlson184b8902010-04-05 10:19:25 +000014029 }
14030
14031partno:
Matt Carlson4181b2c2010-02-26 14:04:45 +000014032 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14033 PCI_VPD_RO_KEYWORD_PARTNO);
14034 if (i < 0)
14035 goto out_not_found;
14036
14037 len = pci_vpd_info_field_size(&vpd_data[i]);
14038
14039 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14040 if (len > TG3_BPN_SIZE ||
Matt Carlson535a4902011-07-20 10:20:56 +000014041 (len + i) > vpdlen)
Matt Carlson4181b2c2010-02-26 14:04:45 +000014042 goto out_not_found;
14043
14044 memcpy(tp->board_part_number, &vpd_data[i], len);
14045
Linus Torvalds1da177e2005-04-16 15:20:36 -070014046out_not_found:
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014047 kfree(vpd_data);
Matt Carlson37a949c2010-09-30 10:34:33 +000014048 if (tp->board_part_number[0])
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014049 return;
14050
14051out_no_vpd:
Matt Carlson37a949c2010-09-30 10:34:33 +000014052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
Michael Chan79d49692012-11-05 14:26:29 +000014053 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
Matt Carlson37a949c2010-09-30 10:34:33 +000014055 strcpy(tp->board_part_number, "BCM5717");
14056 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14057 strcpy(tp->board_part_number, "BCM5718");
14058 else
14059 goto nomatch;
14060 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14062 strcpy(tp->board_part_number, "BCM57780");
14063 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14064 strcpy(tp->board_part_number, "BCM57760");
14065 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14066 strcpy(tp->board_part_number, "BCM57790");
14067 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14068 strcpy(tp->board_part_number, "BCM57788");
14069 else
14070 goto nomatch;
14071 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14072 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14073 strcpy(tp->board_part_number, "BCM57761");
14074 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14075 strcpy(tp->board_part_number, "BCM57765");
14076 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14077 strcpy(tp->board_part_number, "BCM57781");
14078 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14079 strcpy(tp->board_part_number, "BCM57785");
14080 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14081 strcpy(tp->board_part_number, "BCM57791");
14082 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14083 strcpy(tp->board_part_number, "BCM57795");
14084 else
14085 goto nomatch;
Matt Carlson55086ad2011-12-14 11:09:59 +000014086 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14087 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14088 strcpy(tp->board_part_number, "BCM57762");
14089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14090 strcpy(tp->board_part_number, "BCM57766");
14091 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14092 strcpy(tp->board_part_number, "BCM57782");
14093 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14094 strcpy(tp->board_part_number, "BCM57786");
14095 else
14096 goto nomatch;
Matt Carlson37a949c2010-09-30 10:34:33 +000014097 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb5d37722006-09-27 16:06:21 -070014098 strcpy(tp->board_part_number, "BCM95906");
Matt Carlson37a949c2010-09-30 10:34:33 +000014099 } else {
14100nomatch:
Michael Chanb5d37722006-09-27 16:06:21 -070014101 strcpy(tp->board_part_number, "none");
Matt Carlson37a949c2010-09-30 10:34:33 +000014102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014103}
14104
Matt Carlson9c8a6202007-10-21 16:16:08 -070014105static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14106{
14107 u32 val;
14108
Matt Carlsone4f34112009-02-25 14:25:00 +000014109 if (tg3_nvram_read(tp, offset, &val) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014110 (val & 0xfc000000) != 0x0c000000 ||
Matt Carlsone4f34112009-02-25 14:25:00 +000014111 tg3_nvram_read(tp, offset + 4, &val) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014112 val != 0)
14113 return 0;
14114
14115 return 1;
14116}
14117
Matt Carlsonacd9c112009-02-25 14:26:33 +000014118static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14119{
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014120 u32 val, offset, start, ver_offset;
Matt Carlson75f99362010-04-05 10:19:24 +000014121 int i, dst_off;
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014122 bool newver = false;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014123
14124 if (tg3_nvram_read(tp, 0xc, &offset) ||
14125 tg3_nvram_read(tp, 0x4, &start))
14126 return;
14127
14128 offset = tg3_nvram_logical_addr(tp, offset);
14129
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014130 if (tg3_nvram_read(tp, offset, &val))
Matt Carlsonacd9c112009-02-25 14:26:33 +000014131 return;
14132
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014133 if ((val & 0xfc000000) == 0x0c000000) {
14134 if (tg3_nvram_read(tp, offset + 4, &val))
Matt Carlsonacd9c112009-02-25 14:26:33 +000014135 return;
14136
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014137 if (val == 0)
14138 newver = true;
14139 }
14140
Matt Carlson75f99362010-04-05 10:19:24 +000014141 dst_off = strlen(tp->fw_ver);
14142
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014143 if (newver) {
Matt Carlson75f99362010-04-05 10:19:24 +000014144 if (TG3_VER_SIZE - dst_off < 16 ||
14145 tg3_nvram_read(tp, offset + 8, &ver_offset))
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014146 return;
14147
14148 offset = offset + ver_offset - start;
14149 for (i = 0; i < 16; i += 4) {
14150 __be32 v;
14151 if (tg3_nvram_read_be32(tp, offset + i, &v))
14152 return;
14153
Matt Carlson75f99362010-04-05 10:19:24 +000014154 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014155 }
14156 } else {
14157 u32 major, minor;
14158
14159 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14160 return;
14161
14162 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14163 TG3_NVM_BCVER_MAJSFT;
14164 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
Matt Carlson75f99362010-04-05 10:19:24 +000014165 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14166 "v%d.%02d", major, minor);
Matt Carlsonacd9c112009-02-25 14:26:33 +000014167 }
14168}
14169
Matt Carlsona6f6cb12009-02-25 14:27:43 +000014170static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14171{
14172 u32 val, major, minor;
14173
14174 /* Use native endian representation */
14175 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14176 return;
14177
14178 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14179 TG3_NVM_HWSB_CFG1_MAJSFT;
14180 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14181 TG3_NVM_HWSB_CFG1_MINSFT;
14182
14183 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14184}
14185
Matt Carlsondfe00d72008-11-21 17:19:41 -080014186static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14187{
14188 u32 offset, major, minor, build;
14189
Matt Carlson75f99362010-04-05 10:19:24 +000014190 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
Matt Carlsondfe00d72008-11-21 17:19:41 -080014191
14192 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14193 return;
14194
14195 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14196 case TG3_EEPROM_SB_REVISION_0:
14197 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14198 break;
14199 case TG3_EEPROM_SB_REVISION_2:
14200 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14201 break;
14202 case TG3_EEPROM_SB_REVISION_3:
14203 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14204 break;
Matt Carlsona4153d42010-02-17 15:16:56 +000014205 case TG3_EEPROM_SB_REVISION_4:
14206 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14207 break;
14208 case TG3_EEPROM_SB_REVISION_5:
14209 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14210 break;
Matt Carlsonbba226a2010-10-14 10:37:38 +000014211 case TG3_EEPROM_SB_REVISION_6:
14212 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14213 break;
Matt Carlsondfe00d72008-11-21 17:19:41 -080014214 default:
14215 return;
14216 }
14217
Matt Carlsone4f34112009-02-25 14:25:00 +000014218 if (tg3_nvram_read(tp, offset, &val))
Matt Carlsondfe00d72008-11-21 17:19:41 -080014219 return;
14220
14221 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14222 TG3_EEPROM_SB_EDH_BLD_SHFT;
14223 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14224 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14225 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14226
14227 if (minor > 99 || build > 26)
14228 return;
14229
Matt Carlson75f99362010-04-05 10:19:24 +000014230 offset = strlen(tp->fw_ver);
14231 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14232 " v%d.%02d", major, minor);
Matt Carlsondfe00d72008-11-21 17:19:41 -080014233
14234 if (build > 0) {
Matt Carlson75f99362010-04-05 10:19:24 +000014235 offset = strlen(tp->fw_ver);
14236 if (offset < TG3_VER_SIZE - 1)
14237 tp->fw_ver[offset] = 'a' + build - 1;
Matt Carlsondfe00d72008-11-21 17:19:41 -080014238 }
14239}
14240
Matt Carlsonacd9c112009-02-25 14:26:33 +000014241static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
Michael Chanc4e65752006-03-20 22:29:32 -080014242{
14243 u32 val, offset, start;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014244 int i, vlen;
Matt Carlson9c8a6202007-10-21 16:16:08 -070014245
14246 for (offset = TG3_NVM_DIR_START;
14247 offset < TG3_NVM_DIR_END;
14248 offset += TG3_NVM_DIRENT_SIZE) {
Matt Carlsone4f34112009-02-25 14:25:00 +000014249 if (tg3_nvram_read(tp, offset, &val))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014250 return;
14251
14252 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14253 break;
14254 }
14255
14256 if (offset == TG3_NVM_DIR_END)
14257 return;
14258
Joe Perches63c3a662011-04-26 08:12:10 +000014259 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014260 start = 0x08000000;
Matt Carlsone4f34112009-02-25 14:25:00 +000014261 else if (tg3_nvram_read(tp, offset - 4, &start))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014262 return;
14263
Matt Carlsone4f34112009-02-25 14:25:00 +000014264 if (tg3_nvram_read(tp, offset + 4, &offset) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014265 !tg3_fw_img_is_valid(tp, offset) ||
Matt Carlsone4f34112009-02-25 14:25:00 +000014266 tg3_nvram_read(tp, offset + 8, &val))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014267 return;
14268
14269 offset += val - start;
14270
Matt Carlsonacd9c112009-02-25 14:26:33 +000014271 vlen = strlen(tp->fw_ver);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014272
Matt Carlsonacd9c112009-02-25 14:26:33 +000014273 tp->fw_ver[vlen++] = ',';
14274 tp->fw_ver[vlen++] = ' ';
Matt Carlson9c8a6202007-10-21 16:16:08 -070014275
14276 for (i = 0; i < 4; i++) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000014277 __be32 v;
14278 if (tg3_nvram_read_be32(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014279 return;
14280
Al Virob9fc7dc2007-12-17 22:59:57 -080014281 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014282
Matt Carlsonacd9c112009-02-25 14:26:33 +000014283 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14284 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014285 break;
14286 }
14287
Matt Carlsonacd9c112009-02-25 14:26:33 +000014288 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14289 vlen += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070014290 }
Matt Carlsonacd9c112009-02-25 14:26:33 +000014291}
14292
Michael Chan165f4d12012-07-16 16:23:59 +000014293static void __devinit tg3_probe_ncsi(struct tg3 *tp)
Matt Carlson7fd76442009-02-25 14:27:20 +000014294{
Matt Carlson7fd76442009-02-25 14:27:20 +000014295 u32 apedata;
Matt Carlson7fd76442009-02-25 14:27:20 +000014296
14297 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14298 if (apedata != APE_SEG_SIG_MAGIC)
14299 return;
14300
14301 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14302 if (!(apedata & APE_FW_STATUS_READY))
14303 return;
14304
Michael Chan165f4d12012-07-16 16:23:59 +000014305 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14306 tg3_flag_set(tp, APE_HAS_NCSI);
14307}
14308
14309static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14310{
14311 int vlen;
14312 u32 apedata;
14313 char *fwtype;
14314
Matt Carlson7fd76442009-02-25 14:27:20 +000014315 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14316
Michael Chan165f4d12012-07-16 16:23:59 +000014317 if (tg3_flag(tp, APE_HAS_NCSI))
Matt Carlsonecc79642010-08-02 11:26:01 +000014318 fwtype = "NCSI";
Michael Chan165f4d12012-07-16 16:23:59 +000014319 else
Matt Carlsonecc79642010-08-02 11:26:01 +000014320 fwtype = "DASH";
14321
Matt Carlson7fd76442009-02-25 14:27:20 +000014322 vlen = strlen(tp->fw_ver);
14323
Matt Carlsonecc79642010-08-02 11:26:01 +000014324 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14325 fwtype,
Matt Carlson7fd76442009-02-25 14:27:20 +000014326 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14327 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14328 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14329 (apedata & APE_FW_VERSION_BLDMSK));
14330}
14331
Matt Carlsonacd9c112009-02-25 14:26:33 +000014332static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14333{
14334 u32 val;
Matt Carlson75f99362010-04-05 10:19:24 +000014335 bool vpd_vers = false;
14336
14337 if (tp->fw_ver[0] != 0)
14338 vpd_vers = true;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014339
Joe Perches63c3a662011-04-26 08:12:10 +000014340 if (tg3_flag(tp, NO_NVRAM)) {
Matt Carlson75f99362010-04-05 10:19:24 +000014341 strcat(tp->fw_ver, "sb");
Matt Carlsondf259d82009-04-20 06:57:14 +000014342 return;
14343 }
14344
Matt Carlsonacd9c112009-02-25 14:26:33 +000014345 if (tg3_nvram_read(tp, 0, &val))
14346 return;
14347
14348 if (val == TG3_EEPROM_MAGIC)
14349 tg3_read_bc_ver(tp);
14350 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14351 tg3_read_sb_ver(tp, val);
Matt Carlsona6f6cb12009-02-25 14:27:43 +000014352 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14353 tg3_read_hwsb_ver(tp);
Matt Carlsonacd9c112009-02-25 14:26:33 +000014354
Michael Chan165f4d12012-07-16 16:23:59 +000014355 if (tg3_flag(tp, ENABLE_ASF)) {
14356 if (tg3_flag(tp, ENABLE_APE)) {
14357 tg3_probe_ncsi(tp);
14358 if (!vpd_vers)
14359 tg3_read_dash_ver(tp);
14360 } else if (!vpd_vers) {
14361 tg3_read_mgmtfw_ver(tp);
14362 }
Matt Carlsonc9cab242011-07-13 09:27:27 +000014363 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070014364
14365 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080014366}
14367
Matt Carlson7cb32cf2010-09-30 10:34:36 +000014368static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14369{
Joe Perches63c3a662011-04-26 08:12:10 +000014370 if (tg3_flag(tp, LRG_PROD_RING_CAP))
Matt Carlsonde9f5232011-04-05 14:22:43 +000014371 return TG3_RX_RET_MAX_SIZE_5717;
Joe Perches63c3a662011-04-26 08:12:10 +000014372 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
Matt Carlsonde9f5232011-04-05 14:22:43 +000014373 return TG3_RX_RET_MAX_SIZE_5700;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000014374 else
Matt Carlsonde9f5232011-04-05 14:22:43 +000014375 return TG3_RX_RET_MAX_SIZE_5705;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000014376}
14377
Matt Carlson41434702011-03-09 16:58:22 +000014378static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
Joe Perches895950c2010-12-21 02:16:08 -080014379 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14380 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14381 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14382 { },
14383};
14384
Matt Carlson16c7fa72012-02-13 10:20:10 +000014385static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14386{
14387 struct pci_dev *peer;
14388 unsigned int func, devnr = tp->pdev->devfn & ~7;
14389
14390 for (func = 0; func < 8; func++) {
14391 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14392 if (peer && peer != tp->pdev)
14393 break;
14394 pci_dev_put(peer);
14395 }
14396 /* 5704 can be configured in single-port mode, set peer to
14397 * tp->pdev in that case.
14398 */
14399 if (!peer) {
14400 peer = tp->pdev;
14401 return peer;
14402 }
14403
14404 /*
14405 * We don't need to keep the refcount elevated; there's no way
14406 * to remove one half of this device without removing the other
14407 */
14408 pci_dev_put(peer);
14409
14410 return peer;
14411}
14412
Matt Carlson42b123b2012-02-13 15:20:13 +000014413static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14414{
14415 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14417 u32 reg;
14418
14419 /* All devices that use the alternate
14420 * ASIC REV location have a CPMU.
14421 */
14422 tg3_flag_set(tp, CPMU_PRESENT);
14423
14424 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
Michael Chan79d49692012-11-05 14:26:29 +000014425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
Matt Carlson42b123b2012-02-13 15:20:13 +000014426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14427 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14429 reg = TG3PCI_GEN2_PRODID_ASICREV;
14430 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14431 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14432 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14433 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14434 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14435 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14436 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14437 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14438 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14439 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14440 reg = TG3PCI_GEN15_PRODID_ASICREV;
14441 else
14442 reg = TG3PCI_PRODID_ASICREV;
14443
14444 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14445 }
14446
14447 /* Wrong chip ID in 5752 A0. This code can be removed later
14448 * as A0 is not in production.
14449 */
14450 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14451 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14452
Michael Chan79d49692012-11-05 14:26:29 +000014453 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14454 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14455
Matt Carlson42b123b2012-02-13 15:20:13 +000014456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14458 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14459 tg3_flag_set(tp, 5717_PLUS);
14460
14461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14463 tg3_flag_set(tp, 57765_CLASS);
14464
14465 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14466 tg3_flag_set(tp, 57765_PLUS);
14467
14468 /* Intentionally exclude ASIC_REV_5906 */
14469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14475 tg3_flag(tp, 57765_PLUS))
14476 tg3_flag_set(tp, 5755_PLUS);
14477
14478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14480 tg3_flag_set(tp, 5780_CLASS);
14481
14482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14485 tg3_flag(tp, 5755_PLUS) ||
14486 tg3_flag(tp, 5780_CLASS))
14487 tg3_flag_set(tp, 5750_PLUS);
14488
14489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14490 tg3_flag(tp, 5750_PLUS))
14491 tg3_flag_set(tp, 5705_PLUS);
14492}
14493
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000014494static bool tg3_10_100_only_device(struct tg3 *tp,
14495 const struct pci_device_id *ent)
14496{
14497 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14498
14499 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14500 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14501 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14502 return true;
14503
14504 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14506 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14507 return true;
14508 } else {
14509 return true;
14510 }
14511 }
14512
14513 return false;
14514}
14515
14516static int __devinit tg3_get_invariants(struct tg3 *tp,
14517 const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014518{
Linus Torvalds1da177e2005-04-16 15:20:36 -070014519 u32 misc_ctrl_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014520 u32 pci_state_reg, grc_misc_cfg;
14521 u32 val;
14522 u16 pci_cmd;
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014523 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014524
Linus Torvalds1da177e2005-04-16 15:20:36 -070014525 /* Force memory write invalidate off. If we leave it on,
14526 * then on 5700_BX chips we have to enable a workaround.
14527 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14528 * to match the cacheline size. The Broadcom driver have this
14529 * workaround but turns MWI off all the times so never uses
14530 * it. This seems to suggest that the workaround is insufficient.
14531 */
14532 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14533 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14534 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14535
Matt Carlson16821282011-07-13 09:27:28 +000014536 /* Important! -- Make sure register accesses are byteswapped
14537 * correctly. Also, for those chips that require it, make
14538 * sure that indirect register accesses are enabled before
14539 * the first operation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014540 */
14541 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14542 &misc_ctrl_reg);
Matt Carlson16821282011-07-13 09:27:28 +000014543 tp->misc_host_ctrl |= (misc_ctrl_reg &
14544 MISC_HOST_CTRL_CHIPREV);
14545 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14546 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014547
Matt Carlson42b123b2012-02-13 15:20:13 +000014548 tg3_detect_asic_rev(tp, misc_ctrl_reg);
Michael Chanff645be2005-04-21 17:09:53 -070014549
Michael Chan68929142005-08-09 20:17:14 -070014550 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14551 * we need to disable memory and use config. cycles
14552 * only to access all registers. The 5702/03 chips
14553 * can mistakenly decode the special cycles from the
14554 * ICH chipsets as memory write cycles, causing corruption
14555 * of register and memory space. Only certain ICH bridges
14556 * will drive special cycles with non-zero data during the
14557 * address phase which can fall within the 5703's address
14558 * range. This is not an ICH bug as the PCI spec allows
14559 * non-zero address during special cycles. However, only
14560 * these ICH bridges are known to drive non-zero addresses
14561 * during special cycles.
14562 *
14563 * Since special cycles do not cross PCI bridges, we only
14564 * enable this workaround if the 5703 is on the secondary
14565 * bus of these ICH bridges.
14566 */
14567 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14568 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14569 static struct tg3_dev_id {
14570 u32 vendor;
14571 u32 device;
14572 u32 rev;
14573 } ich_chipsets[] = {
14574 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14575 PCI_ANY_ID },
14576 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14577 PCI_ANY_ID },
14578 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14579 0xa },
14580 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14581 PCI_ANY_ID },
14582 { },
14583 };
14584 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14585 struct pci_dev *bridge = NULL;
14586
14587 while (pci_id->vendor != 0) {
14588 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14589 bridge);
14590 if (!bridge) {
14591 pci_id++;
14592 continue;
14593 }
14594 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070014595 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070014596 continue;
14597 }
14598 if (bridge->subordinate &&
14599 (bridge->subordinate->number ==
14600 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014601 tg3_flag_set(tp, ICH_WORKAROUND);
Michael Chan68929142005-08-09 20:17:14 -070014602 pci_dev_put(bridge);
14603 break;
14604 }
14605 }
14606 }
14607
Matt Carlson6ff6f812011-05-19 12:12:54 +000014608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Matt Carlson41588ba2008-04-19 18:12:33 -070014609 static struct tg3_dev_id {
14610 u32 vendor;
14611 u32 device;
14612 } bridge_chipsets[] = {
14613 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14614 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14615 { },
14616 };
14617 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14618 struct pci_dev *bridge = NULL;
14619
14620 while (pci_id->vendor != 0) {
14621 bridge = pci_get_device(pci_id->vendor,
14622 pci_id->device,
14623 bridge);
14624 if (!bridge) {
14625 pci_id++;
14626 continue;
14627 }
14628 if (bridge->subordinate &&
14629 (bridge->subordinate->number <=
14630 tp->pdev->bus->number) &&
Yinghai Lub918c622012-05-17 18:51:11 -070014631 (bridge->subordinate->busn_res.end >=
Matt Carlson41588ba2008-04-19 18:12:33 -070014632 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014633 tg3_flag_set(tp, 5701_DMA_BUG);
Matt Carlson41588ba2008-04-19 18:12:33 -070014634 pci_dev_put(bridge);
14635 break;
14636 }
14637 }
14638 }
14639
Michael Chan4a29cc22006-03-19 13:21:12 -080014640 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14641 * DMA addresses > 40-bit. This bridge may have other additional
14642 * 57xx devices behind it in some 4-port NIC designs for example.
14643 * Any tg3 device found behind the bridge will also need the 40-bit
14644 * DMA workaround.
14645 */
Matt Carlson42b123b2012-02-13 15:20:13 +000014646 if (tg3_flag(tp, 5780_CLASS)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014647 tg3_flag_set(tp, 40BIT_DMA_BUG);
Michael Chan4cf78e42005-07-25 12:29:19 -070014648 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Matt Carlson859a588792010-04-05 10:19:28 +000014649 } else {
Michael Chan4a29cc22006-03-19 13:21:12 -080014650 struct pci_dev *bridge = NULL;
14651
14652 do {
14653 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14654 PCI_DEVICE_ID_SERVERWORKS_EPB,
14655 bridge);
14656 if (bridge && bridge->subordinate &&
14657 (bridge->subordinate->number <=
14658 tp->pdev->bus->number) &&
Yinghai Lub918c622012-05-17 18:51:11 -070014659 (bridge->subordinate->busn_res.end >=
Michael Chan4a29cc22006-03-19 13:21:12 -080014660 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014661 tg3_flag_set(tp, 40BIT_DMA_BUG);
Michael Chan4a29cc22006-03-19 13:21:12 -080014662 pci_dev_put(bridge);
14663 break;
14664 }
14665 } while (bridge);
14666 }
Michael Chan4cf78e42005-07-25 12:29:19 -070014667
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000014668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
Matt Carlson3a1e19d2011-07-13 09:27:32 +000014669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
Michael Chan7544b092007-05-05 13:08:32 -070014670 tp->pdev_peer = tg3_find_peer(tp);
14671
Matt Carlson507399f2009-11-13 13:03:37 +000014672 /* Determine TSO capabilities */
Matt Carlsona0512942011-07-27 14:20:54 +000014673 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
Matt Carlson4d163b72011-01-25 15:58:48 +000014674 ; /* Do nothing. HW bug. */
Joe Perches63c3a662011-04-26 08:12:10 +000014675 else if (tg3_flag(tp, 57765_PLUS))
14676 tg3_flag_set(tp, HW_TSO_3);
14677 else if (tg3_flag(tp, 5755_PLUS) ||
Matt Carlsone849cdc2009-11-13 13:03:38 +000014678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Joe Perches63c3a662011-04-26 08:12:10 +000014679 tg3_flag_set(tp, HW_TSO_2);
14680 else if (tg3_flag(tp, 5750_PLUS)) {
14681 tg3_flag_set(tp, HW_TSO_1);
14682 tg3_flag_set(tp, TSO_BUG);
Matt Carlson507399f2009-11-13 13:03:37 +000014683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14684 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Joe Perches63c3a662011-04-26 08:12:10 +000014685 tg3_flag_clear(tp, TSO_BUG);
Matt Carlson507399f2009-11-13 13:03:37 +000014686 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14687 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14688 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +000014689 tg3_flag_set(tp, TSO_BUG);
Matt Carlson507399f2009-11-13 13:03:37 +000014690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14691 tp->fw_needed = FIRMWARE_TG3TSO5;
14692 else
14693 tp->fw_needed = FIRMWARE_TG3TSO;
14694 }
14695
Matt Carlsondabc5c62011-05-19 12:12:52 +000014696 /* Selectively allow TSO based on operating conditions */
Matt Carlson6ff6f812011-05-19 12:12:54 +000014697 if (tg3_flag(tp, HW_TSO_1) ||
14698 tg3_flag(tp, HW_TSO_2) ||
14699 tg3_flag(tp, HW_TSO_3) ||
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000014700 tp->fw_needed) {
14701 /* For firmware TSO, assume ASF is disabled.
14702 * We'll disable TSO later if we discover ASF
14703 * is enabled in tg3_get_eeprom_hw_cfg().
14704 */
Matt Carlsondabc5c62011-05-19 12:12:52 +000014705 tg3_flag_set(tp, TSO_CAPABLE);
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000014706 } else {
Matt Carlsondabc5c62011-05-19 12:12:52 +000014707 tg3_flag_clear(tp, TSO_CAPABLE);
14708 tg3_flag_clear(tp, TSO_BUG);
14709 tp->fw_needed = NULL;
14710 }
14711
14712 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14713 tp->fw_needed = FIRMWARE_TG3;
14714
Matt Carlson507399f2009-11-13 13:03:37 +000014715 tp->irq_max = 1;
14716
Joe Perches63c3a662011-04-26 08:12:10 +000014717 if (tg3_flag(tp, 5750_PLUS)) {
14718 tg3_flag_set(tp, SUPPORT_MSI);
Michael Chan7544b092007-05-05 13:08:32 -070014719 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14720 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14721 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14722 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14723 tp->pdev_peer == tp->pdev))
Joe Perches63c3a662011-04-26 08:12:10 +000014724 tg3_flag_clear(tp, SUPPORT_MSI);
Michael Chan7544b092007-05-05 13:08:32 -070014725
Joe Perches63c3a662011-04-26 08:12:10 +000014726 if (tg3_flag(tp, 5755_PLUS) ||
Michael Chanb5d37722006-09-27 16:06:21 -070014727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Joe Perches63c3a662011-04-26 08:12:10 +000014728 tg3_flag_set(tp, 1SHOT_MSI);
Michael Chan52c0fd82006-06-29 20:15:54 -070014729 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014730
Joe Perches63c3a662011-04-26 08:12:10 +000014731 if (tg3_flag(tp, 57765_PLUS)) {
14732 tg3_flag_set(tp, SUPPORT_MSIX);
Matt Carlson507399f2009-11-13 13:03:37 +000014733 tp->irq_max = TG3_IRQ_MAX_VECS;
14734 }
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000014735 }
Matt Carlson0e1406d2009-11-02 12:33:33 +000014736
Michael Chan91024262012-09-28 07:12:38 +000014737 tp->txq_max = 1;
14738 tp->rxq_max = 1;
14739 if (tp->irq_max > 1) {
14740 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14741 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14742
14743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14745 tp->txq_max = tp->irq_max - 1;
14746 }
14747
Matt Carlsonb7abee62012-06-07 12:56:54 +000014748 if (tg3_flag(tp, 5755_PLUS) ||
14749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Joe Perches63c3a662011-04-26 08:12:10 +000014750 tg3_flag_set(tp, SHORT_DMA_BUG);
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000014751
Matt Carlsone31aa982011-07-27 14:20:53 +000014752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
Matt Carlsona4cb4282011-12-14 11:09:58 +000014753 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
Matt Carlsone31aa982011-07-27 14:20:53 +000014754
Matt Carlsonfa6b2aa2011-11-21 15:01:19 +000014755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
Joe Perches63c3a662011-04-26 08:12:10 +000014758 tg3_flag_set(tp, LRG_PROD_RING_CAP);
Matt Carlsonde9f5232011-04-05 14:22:43 +000014759
Joe Perches63c3a662011-04-26 08:12:10 +000014760 if (tg3_flag(tp, 57765_PLUS) &&
Matt Carlsona0512942011-07-27 14:20:54 +000014761 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
Joe Perches63c3a662011-04-26 08:12:10 +000014762 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
Matt Carlsonb703df62009-12-03 08:36:21 +000014763
Joe Perches63c3a662011-04-26 08:12:10 +000014764 if (!tg3_flag(tp, 5705_PLUS) ||
14765 tg3_flag(tp, 5780_CLASS) ||
14766 tg3_flag(tp, USE_JUMBO_BDFLAG))
14767 tg3_flag_set(tp, JUMBO_CAPABLE);
Michael Chan0f893dc2005-07-25 12:30:38 -070014768
Matt Carlson52f44902008-11-21 17:17:04 -080014769 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14770 &pci_state_reg);
14771
Jon Mason708ebb32011-06-27 12:56:50 +000014772 if (pci_is_pcie(tp->pdev)) {
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014773 u16 lnkctl;
14774
Joe Perches63c3a662011-04-26 08:12:10 +000014775 tg3_flag_set(tp, PCI_EXPRESS);
Matt Carlson5f5c51e2007-11-12 21:19:37 -080014776
Jiang Liu0f49bfb2012-08-20 13:28:20 -060014777 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014778 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
Matt Carlson7196cd62011-05-19 16:02:44 +000014779 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14780 ASIC_REV_5906) {
Joe Perches63c3a662011-04-26 08:12:10 +000014781 tg3_flag_clear(tp, HW_TSO_2);
Matt Carlsondabc5c62011-05-19 12:12:52 +000014782 tg3_flag_clear(tp, TSO_CAPABLE);
Matt Carlson7196cd62011-05-19 16:02:44 +000014783 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -080014784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson321d32a2008-11-21 17:22:19 -080014785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson9cf74eb2009-04-20 06:58:27 +000014786 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14787 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
Joe Perches63c3a662011-04-26 08:12:10 +000014788 tg3_flag_set(tp, CLKREQ_BUG);
Matt Carlson614b0592010-01-20 16:58:02 +000014789 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +000014790 tg3_flag_set(tp, L1PLLPD_EN);
Michael Chanc7835a72006-11-15 21:14:42 -080014791 }
Matt Carlson52f44902008-11-21 17:17:04 -080014792 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Jon Mason708ebb32011-06-27 12:56:50 +000014793 /* BCM5785 devices are effectively PCIe devices, and should
14794 * follow PCIe codepaths, but do not have a PCIe capabilities
14795 * section.
Matt Carlson93a700a2011-08-31 11:44:54 +000014796 */
Joe Perches63c3a662011-04-26 08:12:10 +000014797 tg3_flag_set(tp, PCI_EXPRESS);
14798 } else if (!tg3_flag(tp, 5705_PLUS) ||
14799 tg3_flag(tp, 5780_CLASS)) {
Matt Carlson52f44902008-11-21 17:17:04 -080014800 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14801 if (!tp->pcix_cap) {
Matt Carlson2445e462010-04-05 10:19:21 +000014802 dev_err(&tp->pdev->dev,
14803 "Cannot find PCI-X capability, aborting\n");
Matt Carlson52f44902008-11-21 17:17:04 -080014804 return -EIO;
14805 }
14806
14807 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
Joe Perches63c3a662011-04-26 08:12:10 +000014808 tg3_flag_set(tp, PCIX_MODE);
Matt Carlson52f44902008-11-21 17:17:04 -080014809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014810
Michael Chan399de502005-10-03 14:02:39 -070014811 /* If we have an AMD 762 or VIA K8T800 chipset, write
14812 * reordering to the mailbox registers done by the host
14813 * controller can cause major troubles. We read back from
14814 * every mailbox register write to force the writes to be
14815 * posted to the chip in order.
14816 */
Matt Carlson41434702011-03-09 16:58:22 +000014817 if (pci_dev_present(tg3_write_reorder_chipsets) &&
Joe Perches63c3a662011-04-26 08:12:10 +000014818 !tg3_flag(tp, PCI_EXPRESS))
14819 tg3_flag_set(tp, MBOX_WRITE_REORDER);
Michael Chan399de502005-10-03 14:02:39 -070014820
Matt Carlson69fc4052008-12-21 20:19:57 -080014821 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14822 &tp->pci_cacheline_sz);
14823 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14824 &tp->pci_lat_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14826 tp->pci_lat_timer < 64) {
14827 tp->pci_lat_timer = 64;
Matt Carlson69fc4052008-12-21 20:19:57 -080014828 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14829 tp->pci_lat_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014830 }
14831
Matt Carlson16821282011-07-13 09:27:28 +000014832 /* Important! -- It is critical that the PCI-X hw workaround
14833 * situation is decided before the first MMIO register access.
14834 */
Matt Carlson52f44902008-11-21 17:17:04 -080014835 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14836 /* 5700 BX chips need to have their TX producer index
14837 * mailboxes written twice to workaround a bug.
14838 */
Joe Perches63c3a662011-04-26 08:12:10 +000014839 tg3_flag_set(tp, TXD_MBOX_HWBUG);
Matt Carlson9974a352007-10-07 23:27:28 -070014840
Matt Carlson52f44902008-11-21 17:17:04 -080014841 /* If we are in PCI-X mode, enable register write workaround.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014842 *
14843 * The workaround is to use indirect register accesses
14844 * for all chip writes not to mailbox registers.
14845 */
Joe Perches63c3a662011-04-26 08:12:10 +000014846 if (tg3_flag(tp, PCIX_MODE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070014847 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014848
Joe Perches63c3a662011-04-26 08:12:10 +000014849 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014850
14851 /* The chip can have it's power management PCI config
14852 * space registers clobbered due to this bug.
14853 * So explicitly force the chip into D0 here.
14854 */
Matt Carlson9974a352007-10-07 23:27:28 -070014855 pci_read_config_dword(tp->pdev,
14856 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070014857 &pm_reg);
14858 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14859 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070014860 pci_write_config_dword(tp->pdev,
14861 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070014862 pm_reg);
14863
14864 /* Also, force SERR#/PERR# in PCI command. */
14865 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14866 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14867 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14868 }
14869 }
14870
Linus Torvalds1da177e2005-04-16 15:20:36 -070014871 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
Joe Perches63c3a662011-04-26 08:12:10 +000014872 tg3_flag_set(tp, PCI_HIGH_SPEED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014873 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
Joe Perches63c3a662011-04-26 08:12:10 +000014874 tg3_flag_set(tp, PCI_32BIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014875
14876 /* Chip-specific fixup from Broadcom driver */
14877 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14878 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14879 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14880 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14881 }
14882
Michael Chan1ee582d2005-08-09 20:16:46 -070014883 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070014884 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070014885 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070014886 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070014887 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070014888 tp->write32_tx_mbox = tg3_write32;
14889 tp->write32_rx_mbox = tg3_write32;
14890
14891 /* Various workaround register access methods */
Joe Perches63c3a662011-04-26 08:12:10 +000014892 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
Michael Chan1ee582d2005-08-09 20:16:46 -070014893 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070014894 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
Joe Perches63c3a662011-04-26 08:12:10 +000014895 (tg3_flag(tp, PCI_EXPRESS) &&
Matt Carlson98efd8a2007-05-05 12:47:25 -070014896 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14897 /*
14898 * Back to back register writes can cause problems on these
14899 * chips, the workaround is to read back all reg writes
14900 * except those to mailbox regs.
14901 *
14902 * See tg3_write_indirect_reg32().
14903 */
Michael Chan1ee582d2005-08-09 20:16:46 -070014904 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070014905 }
14906
Joe Perches63c3a662011-04-26 08:12:10 +000014907 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
Michael Chan1ee582d2005-08-09 20:16:46 -070014908 tp->write32_tx_mbox = tg3_write32_tx_mbox;
Joe Perches63c3a662011-04-26 08:12:10 +000014909 if (tg3_flag(tp, MBOX_WRITE_REORDER))
Michael Chan1ee582d2005-08-09 20:16:46 -070014910 tp->write32_rx_mbox = tg3_write_flush_reg32;
14911 }
Michael Chan20094932005-08-09 20:16:32 -070014912
Joe Perches63c3a662011-04-26 08:12:10 +000014913 if (tg3_flag(tp, ICH_WORKAROUND)) {
Michael Chan68929142005-08-09 20:17:14 -070014914 tp->read32 = tg3_read_indirect_reg32;
14915 tp->write32 = tg3_write_indirect_reg32;
14916 tp->read32_mbox = tg3_read_indirect_mbox;
14917 tp->write32_mbox = tg3_write_indirect_mbox;
14918 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14919 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14920
14921 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070014922 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070014923
14924 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14925 pci_cmd &= ~PCI_COMMAND_MEMORY;
14926 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14927 }
Michael Chanb5d37722006-09-27 16:06:21 -070014928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14929 tp->read32_mbox = tg3_read32_mbox_5906;
14930 tp->write32_mbox = tg3_write32_mbox_5906;
14931 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14932 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14933 }
Michael Chan68929142005-08-09 20:17:14 -070014934
Michael Chanbbadf502006-04-06 21:46:34 -070014935 if (tp->write32 == tg3_write_indirect_reg32 ||
Joe Perches63c3a662011-04-26 08:12:10 +000014936 (tg3_flag(tp, PCIX_MODE) &&
Michael Chanbbadf502006-04-06 21:46:34 -070014937 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070014938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Joe Perches63c3a662011-04-26 08:12:10 +000014939 tg3_flag_set(tp, SRAM_USE_CONFIG);
Michael Chanbbadf502006-04-06 21:46:34 -070014940
Matt Carlson16821282011-07-13 09:27:28 +000014941 /* The memory arbiter has to be enabled in order for SRAM accesses
14942 * to succeed. Normally on powerup the tg3 chip firmware will make
14943 * sure it is enabled, but other entities such as system netboot
14944 * code might disable it.
14945 */
14946 val = tr32(MEMARB_MODE);
14947 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14948
Matt Carlson9dc5e342011-11-04 09:15:02 +000014949 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14951 tg3_flag(tp, 5780_CLASS)) {
14952 if (tg3_flag(tp, PCIX_MODE)) {
14953 pci_read_config_dword(tp->pdev,
14954 tp->pcix_cap + PCI_X_STATUS,
14955 &val);
14956 tp->pci_fn = val & 0x7;
14957 }
14958 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14959 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14960 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14961 NIC_SRAM_CPMUSTAT_SIG) {
14962 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14963 tp->pci_fn = tp->pci_fn ? 1 : 0;
14964 }
14965 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14967 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14968 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14969 NIC_SRAM_CPMUSTAT_SIG) {
14970 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14971 TG3_CPMU_STATUS_FSHFT_5719;
14972 }
Matt Carlson69f11c92011-07-13 09:27:30 +000014973 }
14974
Michael Chan7d0c41e2005-04-21 17:06:20 -070014975 /* Get eeprom hw config before calling tg3_set_power_state().
Joe Perches63c3a662011-04-26 08:12:10 +000014976 * In particular, the TG3_FLAG_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070014977 * determined before calling tg3_set_power_state() so that
14978 * we know whether or not to switch out of Vaux power.
14979 * When the flag is set, it means that GPIO1 is used for eeprom
14980 * write protect and also implies that it is a LOM where GPIOs
14981 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040014982 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070014983 tg3_get_eeprom_hw_cfg(tp);
14984
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000014985 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14986 tg3_flag_clear(tp, TSO_CAPABLE);
14987 tg3_flag_clear(tp, TSO_BUG);
14988 tp->fw_needed = NULL;
14989 }
14990
Joe Perches63c3a662011-04-26 08:12:10 +000014991 if (tg3_flag(tp, ENABLE_APE)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070014992 /* Allow reads and writes to the
14993 * APE register and memory space.
14994 */
14995 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +000014996 PCISTATE_ALLOW_APE_SHMEM_WR |
14997 PCISTATE_ALLOW_APE_PSPACE_WR;
Matt Carlson0d3031d2007-10-10 18:02:43 -070014998 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14999 pci_state_reg);
Matt Carlsonc9cab242011-07-13 09:27:27 +000015000
15001 tg3_ape_lock_init(tp);
Matt Carlson0d3031d2007-10-10 18:02:43 -070015002 }
15003
Matt Carlson16821282011-07-13 09:27:28 +000015004 /* Set up tp->grc_local_ctrl before calling
15005 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15006 * will bring 5700's external PHY out of reset.
Michael Chan314fba32005-04-21 17:07:04 -070015007 * It is also used as eeprom write protect on LOMs.
15008 */
15009 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
Matt Carlson6ff6f812011-05-19 12:12:54 +000015010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
Joe Perches63c3a662011-04-26 08:12:10 +000015011 tg3_flag(tp, EEPROM_WRITE_PROT))
Michael Chan314fba32005-04-21 17:07:04 -070015012 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15013 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070015014 /* Unused GPIO3 must be driven as output on 5752 because there
15015 * are no pull-up resistors on unused GPIO pins.
15016 */
15017 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15018 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070015019
Matt Carlson321d32a2008-11-21 17:22:19 -080015020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsoncb4ed1f2010-01-20 16:58:09 +000015021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
Matt Carlson55086ad2011-12-14 11:09:59 +000015022 tg3_flag(tp, 57765_CLASS))
Michael Chanaf36e6b2006-03-23 01:28:06 -080015023 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15024
Matt Carlson8d519ab2009-04-20 06:58:01 +000015025 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15026 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
Matt Carlson5f0c4a32008-06-09 15:41:12 -070015027 /* Turn off the debug UART. */
15028 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
Joe Perches63c3a662011-04-26 08:12:10 +000015029 if (tg3_flag(tp, IS_NIC))
Matt Carlson5f0c4a32008-06-09 15:41:12 -070015030 /* Keep VMain power. */
15031 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15032 GRC_LCLCTRL_GPIO_OUTPUT0;
15033 }
15034
Matt Carlson16821282011-07-13 09:27:28 +000015035 /* Switch out of Vaux if it is a NIC */
15036 tg3_pwrsrc_switch_to_vmain(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015037
Linus Torvalds1da177e2005-04-16 15:20:36 -070015038 /* Derive initial jumbo mode from MTU assigned in
15039 * ether_setup() via the alloc_etherdev() call
15040 */
Joe Perches63c3a662011-04-26 08:12:10 +000015041 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15042 tg3_flag_set(tp, JUMBO_RING_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015043
15044 /* Determine WakeOnLan speed to use. */
15045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15046 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15047 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15048 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
Joe Perches63c3a662011-04-26 08:12:10 +000015049 tg3_flag_clear(tp, WOL_SPEED_100MB);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015050 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000015051 tg3_flag_set(tp, WOL_SPEED_100MB);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015052 }
15053
Matt Carlson7f97a4b2009-08-25 10:10:03 +000015054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015055 tp->phy_flags |= TG3_PHYFLG_IS_FET;
Matt Carlson7f97a4b2009-08-25 10:10:03 +000015056
Linus Torvalds1da177e2005-04-16 15:20:36 -070015057 /* A few boards don't want Ethernet@WireSpeed phy feature */
Matt Carlson6ff6f812011-05-19 12:12:54 +000015058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15059 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070015060 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070015061 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015062 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15063 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15064 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015065
15066 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15067 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015068 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015069 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015070 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015071
Joe Perches63c3a662011-04-26 08:12:10 +000015072 if (tg3_flag(tp, 5705_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015073 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
Matt Carlson321d32a2008-11-21 17:22:19 -080015074 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000015075 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015076 !tg3_flag(tp, 57765_PLUS)) {
Michael Chanc424cb22006-04-29 18:56:34 -070015077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070015078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070015079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080015081 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15082 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015083 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080015084 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015085 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
Matt Carlson321d32a2008-11-21 17:22:19 -080015086 } else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015087 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
Michael Chanc424cb22006-04-29 18:56:34 -070015088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015089
Matt Carlsonb2a5c192008-04-03 21:44:44 -070015090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15091 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15092 tp->phy_otp = tg3_read_otp_phycfg(tp);
15093 if (tp->phy_otp == 0)
15094 tp->phy_otp = TG3_OTP_DEFAULT;
15095 }
15096
Joe Perches63c3a662011-04-26 08:12:10 +000015097 if (tg3_flag(tp, CPMU_PRESENT))
Matt Carlson8ef21422008-05-02 16:47:53 -070015098 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15099 else
15100 tp->mi_mode = MAC_MI_MODE_BASE;
15101
Linus Torvalds1da177e2005-04-16 15:20:36 -070015102 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015103 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15104 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15105 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15106
Matt Carlson4d958472011-04-20 07:57:35 +000015107 /* Set these bits to enable statistics workaround. */
15108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15109 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15110 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15111 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15112 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15113 }
15114
Matt Carlson321d32a2008-11-21 17:22:19 -080015115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
Joe Perches63c3a662011-04-26 08:12:10 +000015117 tg3_flag_set(tp, USE_PHYLIB);
Matt Carlson57e69832008-05-25 23:48:31 -070015118
Matt Carlson158d7ab2008-05-29 01:37:54 -070015119 err = tg3_mdio_init(tp);
15120 if (err)
15121 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015122
15123 /* Initialize data/descriptor byte/word swapping. */
15124 val = tr32(GRC_MODE);
Matt Carlsonf2096f92011-04-05 14:22:48 +000015125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15126 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15127 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15128 GRC_MODE_B2HRX_ENABLE |
15129 GRC_MODE_HTX2B_ENABLE |
15130 GRC_MODE_HOST_STACKUP);
15131 else
15132 val &= GRC_MODE_HOST_STACKUP;
15133
Linus Torvalds1da177e2005-04-16 15:20:36 -070015134 tw32(GRC_MODE, val | tp->grc_mode);
15135
15136 tg3_switch_clocks(tp);
15137
15138 /* Clear this out for sanity. */
15139 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15140
15141 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15142 &pci_state_reg);
15143 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015144 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015145 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15146
15147 if (chiprevid == CHIPREV_ID_5701_A0 ||
15148 chiprevid == CHIPREV_ID_5701_B0 ||
15149 chiprevid == CHIPREV_ID_5701_B2 ||
15150 chiprevid == CHIPREV_ID_5701_B5) {
15151 void __iomem *sram_base;
15152
15153 /* Write some dummy words into the SRAM status block
15154 * area, see if it reads back correctly. If the return
15155 * value is bad, force enable the PCIX workaround.
15156 */
15157 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15158
15159 writel(0x00000000, sram_base);
15160 writel(0x00000000, sram_base + 4);
15161 writel(0xffffffff, sram_base + 4);
15162 if (readl(sram_base) != 0x00000000)
Joe Perches63c3a662011-04-26 08:12:10 +000015163 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015164 }
15165 }
15166
15167 udelay(50);
15168 tg3_nvram_init(tp);
15169
15170 grc_misc_cfg = tr32(GRC_MISC_CFG);
15171 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15172
Linus Torvalds1da177e2005-04-16 15:20:36 -070015173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15174 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15175 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
Joe Perches63c3a662011-04-26 08:12:10 +000015176 tg3_flag_set(tp, IS_5788);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015177
Joe Perches63c3a662011-04-26 08:12:10 +000015178 if (!tg3_flag(tp, IS_5788) &&
Matt Carlson6ff6f812011-05-19 12:12:54 +000015179 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
Joe Perches63c3a662011-04-26 08:12:10 +000015180 tg3_flag_set(tp, TAGGED_STATUS);
15181 if (tg3_flag(tp, TAGGED_STATUS)) {
David S. Millerfac9b832005-05-18 22:46:34 -070015182 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15183 HOSTCC_MODE_CLRTICK_TXBD);
15184
15185 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15186 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15187 tp->misc_host_ctrl);
15188 }
15189
Matt Carlson3bda1252008-08-15 14:08:22 -070015190 /* Preserve the APE MAC_MODE bits */
Joe Perches63c3a662011-04-26 08:12:10 +000015191 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsond2394e6b2010-11-24 08:31:47 +000015192 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
Matt Carlson3bda1252008-08-15 14:08:22 -070015193 else
Matt Carlson6e01b202011-08-19 13:58:20 +000015194 tp->mac_mode = 0;
Matt Carlson3bda1252008-08-15 14:08:22 -070015195
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000015196 if (tg3_10_100_only_device(tp, ent))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015197 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015198
15199 err = tg3_phy_probe(tp);
15200 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000015201 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015202 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070015203 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015204 }
15205
Matt Carlson184b8902010-04-05 10:19:25 +000015206 tg3_read_vpd(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080015207 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015208
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015209 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15210 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015211 } else {
15212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015213 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015214 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015215 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015216 }
15217
15218 /* 5700 {AX,BX} chips have a broken status block link
15219 * change bit implementation, so we must use the
15220 * status register in those cases.
15221 */
15222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
Joe Perches63c3a662011-04-26 08:12:10 +000015223 tg3_flag_set(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015224 else
Joe Perches63c3a662011-04-26 08:12:10 +000015225 tg3_flag_clear(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015226
15227 /* The led_ctrl is set during tg3_phy_probe, here we might
15228 * have to force the link status polling mechanism based
15229 * upon subsystem IDs.
15230 */
15231 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070015232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015233 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15234 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
Joe Perches63c3a662011-04-26 08:12:10 +000015235 tg3_flag_set(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015236 }
15237
15238 /* For all SERDES we poll the MAC status register. */
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015239 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Joe Perches63c3a662011-04-26 08:12:10 +000015240 tg3_flag_set(tp, POLL_SERDES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015241 else
Joe Perches63c3a662011-04-26 08:12:10 +000015242 tg3_flag_clear(tp, POLL_SERDES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015243
Eric Dumazet9205fd92011-11-18 06:47:01 +000015244 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
Matt Carlsond2757fc2010-04-12 06:58:27 +000015245 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015247 tg3_flag(tp, PCIX_MODE)) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000015248 tp->rx_offset = NET_SKB_PAD;
Matt Carlsond2757fc2010-04-12 06:58:27 +000015249#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Matt Carlson9dc7a112010-04-12 06:58:28 +000015250 tp->rx_copy_thresh = ~(u16)0;
Matt Carlsond2757fc2010-04-12 06:58:27 +000015251#endif
15252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015253
Matt Carlson2c49a442010-09-30 10:34:35 +000015254 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15255 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000015256 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15257
Matt Carlson2c49a442010-09-30 10:34:35 +000015258 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
Michael Chanf92905d2006-06-29 20:14:29 -070015259
15260 /* Increment the rx prod index on the rx std ring by at most
15261 * 8 for these chips to workaround hw errata.
15262 */
15263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15266 tp->rx_std_max_post = 8;
15267
Joe Perches63c3a662011-04-26 08:12:10 +000015268 if (tg3_flag(tp, ASPM_WORKAROUND))
Matt Carlson8ed5d972007-05-07 00:25:49 -070015269 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15270 PCIE_PWR_MGMT_L1_THRESH_MSK;
15271
Linus Torvalds1da177e2005-04-16 15:20:36 -070015272 return err;
15273}
15274
David S. Miller49b6e95f2007-03-29 01:38:42 -070015275#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070015276static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15277{
15278 struct net_device *dev = tp->dev;
15279 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070015280 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070015281 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070015282 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015283
David S. Miller49b6e95f2007-03-29 01:38:42 -070015284 addr = of_get_property(dp, "local-mac-address", &len);
15285 if (addr && len == 6) {
15286 memcpy(dev->dev_addr, addr, 6);
15287 memcpy(dev->perm_addr, dev->dev_addr, 6);
15288 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015289 }
15290 return -ENODEV;
15291}
15292
15293static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15294{
15295 struct net_device *dev = tp->dev;
15296
15297 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070015298 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015299 return 0;
15300}
15301#endif
15302
15303static int __devinit tg3_get_device_address(struct tg3 *tp)
15304{
15305 struct net_device *dev = tp->dev;
15306 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080015307 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015308
David S. Miller49b6e95f2007-03-29 01:38:42 -070015309#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070015310 if (!tg3_get_macaddr_sparc(tp))
15311 return 0;
15312#endif
15313
15314 mac_offset = 0x7c;
Matt Carlson6ff6f812011-05-19 12:12:54 +000015315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
Joe Perches63c3a662011-04-26 08:12:10 +000015316 tg3_flag(tp, 5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015317 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15318 mac_offset = 0xcc;
15319 if (tg3_nvram_lock(tp))
15320 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15321 else
15322 tg3_nvram_unlock(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000015323 } else if (tg3_flag(tp, 5717_PLUS)) {
Matt Carlson69f11c92011-07-13 09:27:30 +000015324 if (tp->pci_fn & 1)
Matt Carlsona1b950d2009-09-01 13:20:17 +000015325 mac_offset = 0xcc;
Matt Carlson69f11c92011-07-13 09:27:30 +000015326 if (tp->pci_fn > 1)
Matt Carlsona50d0792010-06-05 17:24:37 +000015327 mac_offset += 0x18c;
Matt Carlsona1b950d2009-09-01 13:20:17 +000015328 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chanb5d37722006-09-27 16:06:21 -070015329 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015330
15331 /* First try to get it from MAC address mailbox. */
15332 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15333 if ((hi >> 16) == 0x484b) {
15334 dev->dev_addr[0] = (hi >> 8) & 0xff;
15335 dev->dev_addr[1] = (hi >> 0) & 0xff;
15336
15337 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15338 dev->dev_addr[2] = (lo >> 24) & 0xff;
15339 dev->dev_addr[3] = (lo >> 16) & 0xff;
15340 dev->dev_addr[4] = (lo >> 8) & 0xff;
15341 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015342
Michael Chan008652b2006-03-27 23:14:53 -080015343 /* Some old bootcode may report a 0 MAC address in SRAM */
15344 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15345 }
15346 if (!addr_ok) {
15347 /* Next, try NVRAM. */
Joe Perches63c3a662011-04-26 08:12:10 +000015348 if (!tg3_flag(tp, NO_NVRAM) &&
Matt Carlsondf259d82009-04-20 06:57:14 +000015349 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
Matt Carlson6d348f22009-02-25 14:25:52 +000015350 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
Matt Carlson62cedd12009-04-20 14:52:29 -070015351 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15352 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
Michael Chan008652b2006-03-27 23:14:53 -080015353 }
15354 /* Finally just fetch it out of the MAC control regs. */
15355 else {
15356 hi = tr32(MAC_ADDR_0_HIGH);
15357 lo = tr32(MAC_ADDR_0_LOW);
15358
15359 dev->dev_addr[5] = lo & 0xff;
15360 dev->dev_addr[4] = (lo >> 8) & 0xff;
15361 dev->dev_addr[3] = (lo >> 16) & 0xff;
15362 dev->dev_addr[2] = (lo >> 24) & 0xff;
15363 dev->dev_addr[1] = hi & 0xff;
15364 dev->dev_addr[0] = (hi >> 8) & 0xff;
15365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015366 }
15367
15368 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070015369#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070015370 if (!tg3_get_default_macaddr_sparc(tp))
15371 return 0;
15372#endif
15373 return -EINVAL;
15374 }
John W. Linville2ff43692005-09-12 14:44:20 -070015375 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015376 return 0;
15377}
15378
David S. Miller59e6b432005-05-18 22:50:10 -070015379#define BOUNDARY_SINGLE_CACHELINE 1
15380#define BOUNDARY_MULTI_CACHELINE 2
15381
15382static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15383{
15384 int cacheline_size;
15385 u8 byte;
15386 int goal;
15387
15388 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15389 if (byte == 0)
15390 cacheline_size = 1024;
15391 else
15392 cacheline_size = (int) byte * 4;
15393
15394 /* On 5703 and later chips, the boundary bits have no
15395 * effect.
15396 */
15397 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15398 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015399 !tg3_flag(tp, PCI_EXPRESS))
David S. Miller59e6b432005-05-18 22:50:10 -070015400 goto out;
15401
15402#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15403 goal = BOUNDARY_MULTI_CACHELINE;
15404#else
15405#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15406 goal = BOUNDARY_SINGLE_CACHELINE;
15407#else
15408 goal = 0;
15409#endif
15410#endif
15411
Joe Perches63c3a662011-04-26 08:12:10 +000015412 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000015413 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15414 goto out;
15415 }
15416
David S. Miller59e6b432005-05-18 22:50:10 -070015417 if (!goal)
15418 goto out;
15419
15420 /* PCI controllers on most RISC systems tend to disconnect
15421 * when a device tries to burst across a cache-line boundary.
15422 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15423 *
15424 * Unfortunately, for PCI-E there are only limited
15425 * write-side controls for this, and thus for reads
15426 * we will still get the disconnects. We'll also waste
15427 * these PCI cycles for both read and write for chips
15428 * other than 5700 and 5701 which do not implement the
15429 * boundary bits.
15430 */
Joe Perches63c3a662011-04-26 08:12:10 +000015431 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
David S. Miller59e6b432005-05-18 22:50:10 -070015432 switch (cacheline_size) {
15433 case 16:
15434 case 32:
15435 case 64:
15436 case 128:
15437 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15438 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15439 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15440 } else {
15441 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15442 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15443 }
15444 break;
15445
15446 case 256:
15447 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15448 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15449 break;
15450
15451 default:
15452 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15453 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15454 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070015455 }
Joe Perches63c3a662011-04-26 08:12:10 +000015456 } else if (tg3_flag(tp, PCI_EXPRESS)) {
David S. Miller59e6b432005-05-18 22:50:10 -070015457 switch (cacheline_size) {
15458 case 16:
15459 case 32:
15460 case 64:
15461 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15462 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15463 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15464 break;
15465 }
15466 /* fallthrough */
15467 case 128:
15468 default:
15469 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15470 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15471 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070015472 }
David S. Miller59e6b432005-05-18 22:50:10 -070015473 } else {
15474 switch (cacheline_size) {
15475 case 16:
15476 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15477 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15478 DMA_RWCTRL_WRITE_BNDRY_16);
15479 break;
15480 }
15481 /* fallthrough */
15482 case 32:
15483 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15484 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15485 DMA_RWCTRL_WRITE_BNDRY_32);
15486 break;
15487 }
15488 /* fallthrough */
15489 case 64:
15490 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15491 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15492 DMA_RWCTRL_WRITE_BNDRY_64);
15493 break;
15494 }
15495 /* fallthrough */
15496 case 128:
15497 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15498 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15499 DMA_RWCTRL_WRITE_BNDRY_128);
15500 break;
15501 }
15502 /* fallthrough */
15503 case 256:
15504 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15505 DMA_RWCTRL_WRITE_BNDRY_256);
15506 break;
15507 case 512:
15508 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15509 DMA_RWCTRL_WRITE_BNDRY_512);
15510 break;
15511 case 1024:
15512 default:
15513 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15514 DMA_RWCTRL_WRITE_BNDRY_1024);
15515 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070015516 }
David S. Miller59e6b432005-05-18 22:50:10 -070015517 }
15518
15519out:
15520 return val;
15521}
15522
Linus Torvalds1da177e2005-04-16 15:20:36 -070015523static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15524{
15525 struct tg3_internal_buffer_desc test_desc;
15526 u32 sram_dma_descs;
15527 int i, ret;
15528
15529 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15530
15531 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15532 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15533 tw32(RDMAC_STATUS, 0);
15534 tw32(WDMAC_STATUS, 0);
15535
15536 tw32(BUFMGR_MODE, 0);
15537 tw32(FTQ_RESET, 0);
15538
15539 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15540 test_desc.addr_lo = buf_dma & 0xffffffff;
15541 test_desc.nic_mbuf = 0x00002100;
15542 test_desc.len = size;
15543
15544 /*
15545 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15546 * the *second* time the tg3 driver was getting loaded after an
15547 * initial scan.
15548 *
15549 * Broadcom tells me:
15550 * ...the DMA engine is connected to the GRC block and a DMA
15551 * reset may affect the GRC block in some unpredictable way...
15552 * The behavior of resets to individual blocks has not been tested.
15553 *
15554 * Broadcom noted the GRC reset will also reset all sub-components.
15555 */
15556 if (to_device) {
15557 test_desc.cqid_sqid = (13 << 8) | 2;
15558
15559 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15560 udelay(40);
15561 } else {
15562 test_desc.cqid_sqid = (16 << 8) | 7;
15563
15564 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15565 udelay(40);
15566 }
15567 test_desc.flags = 0x00000005;
15568
15569 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15570 u32 val;
15571
15572 val = *(((u32 *)&test_desc) + i);
15573 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15574 sram_dma_descs + (i * sizeof(u32)));
15575 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15576 }
15577 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15578
Matt Carlson859a588792010-04-05 10:19:28 +000015579 if (to_device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015580 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
Matt Carlson859a588792010-04-05 10:19:28 +000015581 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070015582 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015583
15584 ret = -ENODEV;
15585 for (i = 0; i < 40; i++) {
15586 u32 val;
15587
15588 if (to_device)
15589 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15590 else
15591 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15592 if ((val & 0xffff) == sram_dma_descs) {
15593 ret = 0;
15594 break;
15595 }
15596
15597 udelay(100);
15598 }
15599
15600 return ret;
15601}
15602
David S. Millerded73402005-05-23 13:59:47 -070015603#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070015604
Matt Carlson41434702011-03-09 16:58:22 +000015605static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
Joe Perches895950c2010-12-21 02:16:08 -080015606 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15607 { },
15608};
15609
Linus Torvalds1da177e2005-04-16 15:20:36 -070015610static int __devinit tg3_test_dma(struct tg3 *tp)
15611{
15612 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070015613 u32 *buf, saved_dma_rwctrl;
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000015614 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015615
Matt Carlson4bae65c2010-11-24 08:31:52 +000015616 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15617 &buf_dma, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015618 if (!buf) {
15619 ret = -ENOMEM;
15620 goto out_nofree;
15621 }
15622
15623 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15624 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15625
David S. Miller59e6b432005-05-18 22:50:10 -070015626 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015627
Joe Perches63c3a662011-04-26 08:12:10 +000015628 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000015629 goto out;
15630
Joe Perches63c3a662011-04-26 08:12:10 +000015631 if (tg3_flag(tp, PCI_EXPRESS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015632 /* DMA read watermark not used on PCIE */
15633 tp->dma_rwctrl |= 0x00180000;
Joe Perches63c3a662011-04-26 08:12:10 +000015634 } else if (!tg3_flag(tp, PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070015635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015637 tp->dma_rwctrl |= 0x003f0000;
15638 else
15639 tp->dma_rwctrl |= 0x003f000f;
15640 } else {
15641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15643 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080015644 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015645
Michael Chan4a29cc22006-03-19 13:21:12 -080015646 /* If the 5704 is behind the EPB bridge, we can
15647 * do the less restrictive ONE_DMA workaround for
15648 * better performance.
15649 */
Joe Perches63c3a662011-04-26 08:12:10 +000015650 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
Michael Chan4a29cc22006-03-19 13:21:12 -080015651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15652 tp->dma_rwctrl |= 0x8000;
15653 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015654 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15655
Michael Chan49afdeb2007-02-13 12:17:03 -080015656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15657 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070015658 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080015659 tp->dma_rwctrl |=
15660 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15661 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15662 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070015663 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15664 /* 5780 always in PCIX mode */
15665 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070015666 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15667 /* 5714 always in PCIX mode */
15668 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015669 } else {
15670 tp->dma_rwctrl |= 0x001b000f;
15671 }
15672 }
15673
15674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15676 tp->dma_rwctrl &= 0xfffffff0;
15677
15678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15680 /* Remove this if it causes problems for some boards. */
15681 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15682
15683 /* On 5700/5701 chips, we need to set this bit.
15684 * Otherwise the chip will issue cacheline transactions
15685 * to streamable DMA memory with not all the byte
15686 * enables turned on. This is an error on several
15687 * RISC PCI controllers, in particular sparc64.
15688 *
15689 * On 5703/5704 chips, this bit has been reassigned
15690 * a different meaning. In particular, it is used
15691 * on those chips to enable a PCI-X workaround.
15692 */
15693 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15694 }
15695
15696 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15697
15698#if 0
15699 /* Unneeded, already done by tg3_get_invariants. */
15700 tg3_switch_clocks(tp);
15701#endif
15702
Linus Torvalds1da177e2005-04-16 15:20:36 -070015703 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15704 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15705 goto out;
15706
David S. Miller59e6b432005-05-18 22:50:10 -070015707 /* It is best to perform DMA test with maximum write burst size
15708 * to expose the 5700/5701 write DMA bug.
15709 */
15710 saved_dma_rwctrl = tp->dma_rwctrl;
15711 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15712 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15713
Linus Torvalds1da177e2005-04-16 15:20:36 -070015714 while (1) {
15715 u32 *p = buf, i;
15716
15717 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15718 p[i] = i;
15719
15720 /* Send the buffer to the chip. */
15721 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15722 if (ret) {
Matt Carlson2445e462010-04-05 10:19:21 +000015723 dev_err(&tp->pdev->dev,
15724 "%s: Buffer write failed. err = %d\n",
15725 __func__, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015726 break;
15727 }
15728
15729#if 0
15730 /* validate data reached card RAM correctly. */
15731 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15732 u32 val;
15733 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15734 if (le32_to_cpu(val) != p[i]) {
Matt Carlson2445e462010-04-05 10:19:21 +000015735 dev_err(&tp->pdev->dev,
15736 "%s: Buffer corrupted on device! "
15737 "(%d != %d)\n", __func__, val, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015738 /* ret = -ENODEV here? */
15739 }
15740 p[i] = 0;
15741 }
15742#endif
15743 /* Now read it back. */
15744 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15745 if (ret) {
Matt Carlson5129c3a2010-04-05 10:19:23 +000015746 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15747 "err = %d\n", __func__, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015748 break;
15749 }
15750
15751 /* Verify it. */
15752 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15753 if (p[i] == i)
15754 continue;
15755
David S. Miller59e6b432005-05-18 22:50:10 -070015756 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15757 DMA_RWCTRL_WRITE_BNDRY_16) {
15758 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015759 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15760 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15761 break;
15762 } else {
Matt Carlson2445e462010-04-05 10:19:21 +000015763 dev_err(&tp->pdev->dev,
15764 "%s: Buffer corrupted on read back! "
15765 "(%d != %d)\n", __func__, p[i], i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015766 ret = -ENODEV;
15767 goto out;
15768 }
15769 }
15770
15771 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15772 /* Success. */
15773 ret = 0;
15774 break;
15775 }
15776 }
David S. Miller59e6b432005-05-18 22:50:10 -070015777 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15778 DMA_RWCTRL_WRITE_BNDRY_16) {
15779 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070015780 * now look for chipsets that are known to expose the
15781 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070015782 */
Matt Carlson41434702011-03-09 16:58:22 +000015783 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070015784 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15785 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
Matt Carlson859a588792010-04-05 10:19:28 +000015786 } else {
Michael Chan6d1cfba2005-06-08 14:13:14 -070015787 /* Safe to use the calculated DMA boundary. */
15788 tp->dma_rwctrl = saved_dma_rwctrl;
Matt Carlson859a588792010-04-05 10:19:28 +000015789 }
Michael Chan6d1cfba2005-06-08 14:13:14 -070015790
David S. Miller59e6b432005-05-18 22:50:10 -070015791 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015793
15794out:
Matt Carlson4bae65c2010-11-24 08:31:52 +000015795 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015796out_nofree:
15797 return ret;
15798}
15799
Linus Torvalds1da177e2005-04-16 15:20:36 -070015800static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15801{
Joe Perches63c3a662011-04-26 08:12:10 +000015802 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlson666bc832010-01-20 16:58:03 +000015803 tp->bufmgr_config.mbuf_read_dma_low_water =
15804 DEFAULT_MB_RDMA_LOW_WATER_5705;
15805 tp->bufmgr_config.mbuf_mac_rx_low_water =
15806 DEFAULT_MB_MACRX_LOW_WATER_57765;
15807 tp->bufmgr_config.mbuf_high_water =
15808 DEFAULT_MB_HIGH_WATER_57765;
15809
15810 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15811 DEFAULT_MB_RDMA_LOW_WATER_5705;
15812 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15813 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15814 tp->bufmgr_config.mbuf_high_water_jumbo =
15815 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
Joe Perches63c3a662011-04-26 08:12:10 +000015816 } else if (tg3_flag(tp, 5705_PLUS)) {
Michael Chanfdfec1722005-07-25 12:31:48 -070015817 tp->bufmgr_config.mbuf_read_dma_low_water =
15818 DEFAULT_MB_RDMA_LOW_WATER_5705;
15819 tp->bufmgr_config.mbuf_mac_rx_low_water =
15820 DEFAULT_MB_MACRX_LOW_WATER_5705;
15821 tp->bufmgr_config.mbuf_high_water =
15822 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070015823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15824 tp->bufmgr_config.mbuf_mac_rx_low_water =
15825 DEFAULT_MB_MACRX_LOW_WATER_5906;
15826 tp->bufmgr_config.mbuf_high_water =
15827 DEFAULT_MB_HIGH_WATER_5906;
15828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015829
Michael Chanfdfec1722005-07-25 12:31:48 -070015830 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15831 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15832 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15833 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15834 tp->bufmgr_config.mbuf_high_water_jumbo =
15835 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15836 } else {
15837 tp->bufmgr_config.mbuf_read_dma_low_water =
15838 DEFAULT_MB_RDMA_LOW_WATER;
15839 tp->bufmgr_config.mbuf_mac_rx_low_water =
15840 DEFAULT_MB_MACRX_LOW_WATER;
15841 tp->bufmgr_config.mbuf_high_water =
15842 DEFAULT_MB_HIGH_WATER;
15843
15844 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15845 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15846 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15847 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15848 tp->bufmgr_config.mbuf_high_water_jumbo =
15849 DEFAULT_MB_HIGH_WATER_JUMBO;
15850 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015851
15852 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15853 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15854}
15855
15856static char * __devinit tg3_phy_string(struct tg3 *tp)
15857{
Matt Carlson79eb6902010-02-17 15:17:03 +000015858 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15859 case TG3_PHY_ID_BCM5400: return "5400";
15860 case TG3_PHY_ID_BCM5401: return "5401";
15861 case TG3_PHY_ID_BCM5411: return "5411";
15862 case TG3_PHY_ID_BCM5701: return "5701";
15863 case TG3_PHY_ID_BCM5703: return "5703";
15864 case TG3_PHY_ID_BCM5704: return "5704";
15865 case TG3_PHY_ID_BCM5705: return "5705";
15866 case TG3_PHY_ID_BCM5750: return "5750";
15867 case TG3_PHY_ID_BCM5752: return "5752";
15868 case TG3_PHY_ID_BCM5714: return "5714";
15869 case TG3_PHY_ID_BCM5780: return "5780";
15870 case TG3_PHY_ID_BCM5755: return "5755";
15871 case TG3_PHY_ID_BCM5787: return "5787";
15872 case TG3_PHY_ID_BCM5784: return "5784";
15873 case TG3_PHY_ID_BCM5756: return "5722/5756";
15874 case TG3_PHY_ID_BCM5906: return "5906";
15875 case TG3_PHY_ID_BCM5761: return "5761";
15876 case TG3_PHY_ID_BCM5718C: return "5718C";
15877 case TG3_PHY_ID_BCM5718S: return "5718S";
15878 case TG3_PHY_ID_BCM57765: return "57765";
Matt Carlson302b5002010-06-05 17:24:38 +000015879 case TG3_PHY_ID_BCM5719C: return "5719C";
Matt Carlson6418f2c2011-04-05 14:22:49 +000015880 case TG3_PHY_ID_BCM5720C: return "5720C";
Matt Carlson79eb6902010-02-17 15:17:03 +000015881 case TG3_PHY_ID_BCM8002: return "8002/serdes";
Linus Torvalds1da177e2005-04-16 15:20:36 -070015882 case 0: return "serdes";
15883 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070015884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015885}
15886
Michael Chanf9804dd2005-09-27 12:13:10 -070015887static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15888{
Joe Perches63c3a662011-04-26 08:12:10 +000015889 if (tg3_flag(tp, PCI_EXPRESS)) {
Michael Chanf9804dd2005-09-27 12:13:10 -070015890 strcpy(str, "PCI Express");
15891 return str;
Joe Perches63c3a662011-04-26 08:12:10 +000015892 } else if (tg3_flag(tp, PCIX_MODE)) {
Michael Chanf9804dd2005-09-27 12:13:10 -070015893 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15894
15895 strcpy(str, "PCIX:");
15896
15897 if ((clock_ctrl == 7) ||
15898 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15899 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15900 strcat(str, "133MHz");
15901 else if (clock_ctrl == 0)
15902 strcat(str, "33MHz");
15903 else if (clock_ctrl == 2)
15904 strcat(str, "50MHz");
15905 else if (clock_ctrl == 4)
15906 strcat(str, "66MHz");
15907 else if (clock_ctrl == 6)
15908 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070015909 } else {
15910 strcpy(str, "PCI:");
Joe Perches63c3a662011-04-26 08:12:10 +000015911 if (tg3_flag(tp, PCI_HIGH_SPEED))
Michael Chanf9804dd2005-09-27 12:13:10 -070015912 strcat(str, "66MHz");
15913 else
15914 strcat(str, "33MHz");
15915 }
Joe Perches63c3a662011-04-26 08:12:10 +000015916 if (tg3_flag(tp, PCI_32BIT))
Michael Chanf9804dd2005-09-27 12:13:10 -070015917 strcat(str, ":32-bit");
15918 else
15919 strcat(str, ":64-bit");
15920 return str;
15921}
15922
David S. Miller15f98502005-05-18 22:49:26 -070015923static void __devinit tg3_init_coal(struct tg3 *tp)
15924{
15925 struct ethtool_coalesce *ec = &tp->coal;
15926
15927 memset(ec, 0, sizeof(*ec));
15928 ec->cmd = ETHTOOL_GCOALESCE;
15929 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15930 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15931 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15932 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15933 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15934 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15935 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15936 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15937 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15938
15939 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15940 HOSTCC_MODE_CLRTICK_TXBD)) {
15941 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15942 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15943 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15944 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15945 }
Michael Chand244c892005-07-05 14:42:33 -070015946
Joe Perches63c3a662011-04-26 08:12:10 +000015947 if (tg3_flag(tp, 5705_PLUS)) {
Michael Chand244c892005-07-05 14:42:33 -070015948 ec->rx_coalesce_usecs_irq = 0;
15949 ec->tx_coalesce_usecs_irq = 0;
15950 ec->stats_block_coalesce_usecs = 0;
15951 }
David S. Miller15f98502005-05-18 22:49:26 -070015952}
15953
Linus Torvalds1da177e2005-04-16 15:20:36 -070015954static int __devinit tg3_init_one(struct pci_dev *pdev,
15955 const struct pci_device_id *ent)
15956{
Linus Torvalds1da177e2005-04-16 15:20:36 -070015957 struct net_device *dev;
15958 struct tg3 *tp;
Matt Carlson646c9ed2009-09-01 12:58:41 +000015959 int i, err, pm_cap;
15960 u32 sndmbx, rcvmbx, intmbx;
Michael Chanf9804dd2005-09-27 12:13:10 -070015961 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080015962 u64 dma_mask, persist_dma_mask;
Michał Mirosławc8f44af2011-11-15 15:29:55 +000015963 netdev_features_t features = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015964
Joe Perches05dbe002010-02-17 19:44:19 +000015965 printk_once(KERN_INFO "%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015966
15967 err = pci_enable_device(pdev);
15968 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000015969 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070015970 return err;
15971 }
15972
Linus Torvalds1da177e2005-04-16 15:20:36 -070015973 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15974 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000015975 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070015976 goto err_out_disable_pdev;
15977 }
15978
15979 pci_set_master(pdev);
15980
15981 /* Find power-management capability. */
15982 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15983 if (pm_cap == 0) {
Matt Carlson2445e462010-04-05 10:19:21 +000015984 dev_err(&pdev->dev,
15985 "Cannot find Power Management capability, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070015986 err = -EIO;
15987 goto err_out_free_res;
15988 }
15989
Matt Carlson16821282011-07-13 09:27:28 +000015990 err = pci_set_power_state(pdev, PCI_D0);
15991 if (err) {
15992 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15993 goto err_out_free_res;
15994 }
15995
Matt Carlsonfe5f5782009-09-01 13:09:39 +000015996 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015997 if (!dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015998 err = -ENOMEM;
Matt Carlson16821282011-07-13 09:27:28 +000015999 goto err_out_power_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016000 }
16001
Linus Torvalds1da177e2005-04-16 15:20:36 -070016002 SET_NETDEV_DEV(dev, &pdev->dev);
16003
Linus Torvalds1da177e2005-04-16 15:20:36 -070016004 tp = netdev_priv(dev);
16005 tp->pdev = pdev;
16006 tp->dev = dev;
16007 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016008 tp->rx_mode = TG3_DEF_RX_MODE;
16009 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070016010
Linus Torvalds1da177e2005-04-16 15:20:36 -070016011 if (tg3_debug > 0)
16012 tp->msg_enable = tg3_debug;
16013 else
16014 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16015
16016 /* The word/byte swap controls here control register access byte
16017 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16018 * setting below.
16019 */
16020 tp->misc_host_ctrl =
16021 MISC_HOST_CTRL_MASK_PCI_INT |
16022 MISC_HOST_CTRL_WORD_SWAP |
16023 MISC_HOST_CTRL_INDIR_ACCESS |
16024 MISC_HOST_CTRL_PCISTATE_RW;
16025
16026 /* The NONFRM (non-frame) byte/word swap controls take effect
16027 * on descriptor entries, anything which isn't packet data.
16028 *
16029 * The StrongARM chips on the board (one for tx, one for rx)
16030 * are running in big-endian mode.
16031 */
16032 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16033 GRC_MODE_WSWAP_NONFRM_DATA);
16034#ifdef __BIG_ENDIAN
16035 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16036#endif
16037 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016038 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000016039 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016040
Matt Carlsond5fe4882008-11-21 17:20:32 -080016041 tp->regs = pci_ioremap_bar(pdev, BAR_0);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010016042 if (!tp->regs) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016043 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016044 err = -ENOMEM;
16045 goto err_out_free_dev;
16046 }
16047
Matt Carlsonc9cab242011-07-13 09:27:27 +000016048 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16049 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
Michael Chan79d49692012-11-05 14:26:29 +000016053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
Matt Carlsonc9cab242011-07-13 09:27:27 +000016054 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16055 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16056 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16057 tg3_flag_set(tp, ENABLE_APE);
16058 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16059 if (!tp->aperegs) {
16060 dev_err(&pdev->dev,
16061 "Cannot map APE registers, aborting\n");
16062 err = -ENOMEM;
16063 goto err_out_iounmap;
16064 }
16065 }
16066
Linus Torvalds1da177e2005-04-16 15:20:36 -070016067 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16068 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016069
Linus Torvalds1da177e2005-04-16 15:20:36 -070016070 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016071 dev->watchdog_timeo = TG3_TX_TIMEOUT;
Matt Carlson2ffcc982011-05-19 12:12:44 +000016072 dev->netdev_ops = &tg3_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016073 dev->irq = pdev->irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016074
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000016075 err = tg3_get_invariants(tp, ent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016076 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016077 dev_err(&pdev->dev,
16078 "Problem fetching invariants of chip, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016079 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016080 }
16081
Michael Chan4a29cc22006-03-19 13:21:12 -080016082 /* The EPB bridge inside 5714, 5715, and 5780 and any
16083 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080016084 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16085 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16086 * do DMA address check in tg3_start_xmit().
16087 */
Joe Perches63c3a662011-04-26 08:12:10 +000016088 if (tg3_flag(tp, IS_5788))
Yang Hongyang284901a2009-04-06 19:01:15 -070016089 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
Joe Perches63c3a662011-04-26 08:12:10 +000016090 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
Yang Hongyang50cf1562009-04-06 19:01:14 -070016091 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
Michael Chan72f2afb2006-03-06 19:28:35 -080016092#ifdef CONFIG_HIGHMEM
Yang Hongyang6a355282009-04-06 19:01:13 -070016093 dma_mask = DMA_BIT_MASK(64);
Michael Chan72f2afb2006-03-06 19:28:35 -080016094#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080016095 } else
Yang Hongyang6a355282009-04-06 19:01:13 -070016096 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
Michael Chan72f2afb2006-03-06 19:28:35 -080016097
16098 /* Configure DMA attributes. */
Yang Hongyang284901a2009-04-06 19:01:15 -070016099 if (dma_mask > DMA_BIT_MASK(32)) {
Michael Chan72f2afb2006-03-06 19:28:35 -080016100 err = pci_set_dma_mask(pdev, dma_mask);
16101 if (!err) {
Matt Carlson0da06062011-05-19 12:12:53 +000016102 features |= NETIF_F_HIGHDMA;
Michael Chan72f2afb2006-03-06 19:28:35 -080016103 err = pci_set_consistent_dma_mask(pdev,
16104 persist_dma_mask);
16105 if (err < 0) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016106 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16107 "DMA for consistent allocations\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016108 goto err_out_apeunmap;
Michael Chan72f2afb2006-03-06 19:28:35 -080016109 }
16110 }
16111 }
Yang Hongyang284901a2009-04-06 19:01:15 -070016112 if (err || dma_mask == DMA_BIT_MASK(32)) {
16113 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Michael Chan72f2afb2006-03-06 19:28:35 -080016114 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016115 dev_err(&pdev->dev,
16116 "No usable DMA configuration, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016117 goto err_out_apeunmap;
Michael Chan72f2afb2006-03-06 19:28:35 -080016118 }
16119 }
16120
Michael Chanfdfec1722005-07-25 12:31:48 -070016121 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016122
Matt Carlson0da06062011-05-19 12:12:53 +000016123 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16124
16125 /* 5700 B0 chips do not support checksumming correctly due
16126 * to hardware bugs.
16127 */
16128 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16129 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16130
16131 if (tg3_flag(tp, 5755_PLUS))
16132 features |= NETIF_F_IPV6_CSUM;
16133 }
16134
Michael Chan4e3a7aa2006-03-20 17:47:44 -080016135 /* TSO is on by default on chips that support hardware TSO.
16136 * Firmware TSO on older chips gives lower performance, so it
16137 * is off by default, but can be enabled using ethtool.
16138 */
Joe Perches63c3a662011-04-26 08:12:10 +000016139 if ((tg3_flag(tp, HW_TSO_1) ||
16140 tg3_flag(tp, HW_TSO_2) ||
16141 tg3_flag(tp, HW_TSO_3)) &&
Matt Carlson0da06062011-05-19 12:12:53 +000016142 (features & NETIF_F_IP_CSUM))
16143 features |= NETIF_F_TSO;
Joe Perches63c3a662011-04-26 08:12:10 +000016144 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
Matt Carlson0da06062011-05-19 12:12:53 +000016145 if (features & NETIF_F_IPV6_CSUM)
16146 features |= NETIF_F_TSO6;
Joe Perches63c3a662011-04-26 08:12:10 +000016147 if (tg3_flag(tp, HW_TSO_3) ||
Matt Carlsone849cdc2009-11-13 13:03:38 +000016148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070016149 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16150 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
Joe Perches63c3a662011-04-26 08:12:10 +000016151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michał Mirosławdc668912011-04-07 03:35:07 +000016152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
Matt Carlson0da06062011-05-19 12:12:53 +000016153 features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070016154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016155
Matt Carlsond542fe22011-05-19 16:02:43 +000016156 dev->features |= features;
16157 dev->vlan_features |= features;
16158
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000016159 /*
16160 * Add loopback capability only for a subset of devices that support
16161 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16162 * loopback for the remaining devices.
16163 */
16164 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16165 !tg3_flag(tp, CPMU_PRESENT))
16166 /* Add the loopback capability */
Matt Carlson0da06062011-05-19 12:12:53 +000016167 features |= NETIF_F_LOOPBACK;
16168
Matt Carlson0da06062011-05-19 12:12:53 +000016169 dev->hw_features |= features;
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000016170
Linus Torvalds1da177e2005-04-16 15:20:36 -070016171 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
Joe Perches63c3a662011-04-26 08:12:10 +000016172 !tg3_flag(tp, TSO_CAPABLE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070016173 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
Joe Perches63c3a662011-04-26 08:12:10 +000016174 tg3_flag_set(tp, MAX_RXPEND_64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016175 tp->rx_pending = 63;
16176 }
16177
Linus Torvalds1da177e2005-04-16 15:20:36 -070016178 err = tg3_get_device_address(tp);
16179 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016180 dev_err(&pdev->dev,
16181 "Could not obtain valid ethernet address, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016182 goto err_out_apeunmap;
Matt Carlson0d3031d2007-10-10 18:02:43 -070016183 }
16184
Matt Carlsonc88864d2007-11-12 21:07:01 -080016185 /*
16186 * Reset chip in case UNDI or EFI driver did not shutdown
16187 * DMA self test will enable WDMAC and we'll see (spurious)
16188 * pending DMA on the PCI bus at that point.
16189 */
16190 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16191 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16192 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16194 }
16195
16196 err = tg3_test_dma(tp);
16197 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016198 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
Matt Carlsonc88864d2007-11-12 21:07:01 -080016199 goto err_out_apeunmap;
16200 }
16201
Matt Carlson78f90dc2009-11-13 13:03:42 +000016202 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16203 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16204 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
Matt Carlson6fd45cb2010-09-15 08:59:57 +000016205 for (i = 0; i < tp->irq_max; i++) {
Matt Carlson78f90dc2009-11-13 13:03:42 +000016206 struct tg3_napi *tnapi = &tp->napi[i];
16207
16208 tnapi->tp = tp;
16209 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16210
16211 tnapi->int_mbox = intmbx;
Matt Carlson93a700a2011-08-31 11:44:54 +000016212 if (i <= 4)
Matt Carlson78f90dc2009-11-13 13:03:42 +000016213 intmbx += 0x8;
16214 else
16215 intmbx += 0x4;
16216
16217 tnapi->consmbox = rcvmbx;
16218 tnapi->prodmbox = sndmbx;
16219
Matt Carlson66cfd1b2010-09-30 10:34:30 +000016220 if (i)
Matt Carlson78f90dc2009-11-13 13:03:42 +000016221 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
Matt Carlson66cfd1b2010-09-30 10:34:30 +000016222 else
Matt Carlson78f90dc2009-11-13 13:03:42 +000016223 tnapi->coal_now = HOSTCC_MODE_NOW;
Matt Carlson78f90dc2009-11-13 13:03:42 +000016224
Joe Perches63c3a662011-04-26 08:12:10 +000016225 if (!tg3_flag(tp, SUPPORT_MSIX))
Matt Carlson78f90dc2009-11-13 13:03:42 +000016226 break;
16227
16228 /*
16229 * If we support MSIX, we'll be using RSS. If we're using
16230 * RSS, the first vector only handles link interrupts and the
16231 * remaining vectors handle rx and tx interrupts. Reuse the
16232 * mailbox values for the next iteration. The values we setup
16233 * above are still useful for the single vectored mode.
16234 */
16235 if (!i)
16236 continue;
16237
16238 rcvmbx += 0x8;
16239
16240 if (sndmbx & 0x4)
16241 sndmbx -= 0x4;
16242 else
16243 sndmbx += 0xc;
16244 }
16245
Matt Carlsonc88864d2007-11-12 21:07:01 -080016246 tg3_init_coal(tp);
16247
Michael Chanc49a1562006-12-17 17:07:29 -080016248 pci_set_drvdata(pdev, dev);
16249
Matt Carlsoncd0d7222011-07-13 09:27:33 +000016250 if (tg3_flag(tp, 5717_PLUS)) {
16251 /* Resume a low-power mode */
16252 tg3_frob_aux_power(tp, false);
16253 }
16254
Matt Carlson21f76382012-02-22 12:35:21 +000016255 tg3_timer_init(tp);
16256
Linus Torvalds1da177e2005-04-16 15:20:36 -070016257 err = register_netdev(dev);
16258 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016259 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070016260 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016261 }
16262
Joe Perches05dbe002010-02-17 19:44:19 +000016263 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16264 tp->board_part_number,
16265 tp->pci_chip_rev_id,
16266 tg3_bus_string(tp, str),
16267 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016268
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016269 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000016270 struct phy_device *phydev;
16271 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlson5129c3a2010-04-05 10:19:23 +000016272 netdev_info(dev,
16273 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
Joe Perches05dbe002010-02-17 19:44:19 +000016274 phydev->drv->name, dev_name(&phydev->dev));
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016275 } else {
16276 char *ethtype;
16277
16278 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16279 ethtype = "10/100Base-TX";
16280 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16281 ethtype = "1000Base-SX";
16282 else
16283 ethtype = "10/100/1000Base-T";
16284
Matt Carlson5129c3a2010-04-05 10:19:23 +000016285 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
Matt Carlson47007832011-04-20 07:57:43 +000016286 "(WireSpeed[%d], EEE[%d])\n",
16287 tg3_phy_string(tp), ethtype,
16288 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16289 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016290 }
Matt Carlsondf59c942008-11-03 16:52:56 -080016291
Joe Perches05dbe002010-02-17 19:44:19 +000016292 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Michał Mirosławdc668912011-04-07 03:35:07 +000016293 (dev->features & NETIF_F_RXCSUM) != 0,
Joe Perches63c3a662011-04-26 08:12:10 +000016294 tg3_flag(tp, USE_LINKCHG_REG) != 0,
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016295 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
Joe Perches63c3a662011-04-26 08:12:10 +000016296 tg3_flag(tp, ENABLE_ASF) != 0,
16297 tg3_flag(tp, TSO_CAPABLE) != 0);
Joe Perches05dbe002010-02-17 19:44:19 +000016298 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16299 tp->dma_rwctrl,
16300 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16301 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016302
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016303 pci_save_state(pdev);
16304
Linus Torvalds1da177e2005-04-16 15:20:36 -070016305 return 0;
16306
Matt Carlson0d3031d2007-10-10 18:02:43 -070016307err_out_apeunmap:
16308 if (tp->aperegs) {
16309 iounmap(tp->aperegs);
16310 tp->aperegs = NULL;
16311 }
16312
Linus Torvalds1da177e2005-04-16 15:20:36 -070016313err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070016314 if (tp->regs) {
16315 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070016316 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070016317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016318
16319err_out_free_dev:
16320 free_netdev(dev);
16321
Matt Carlson16821282011-07-13 09:27:28 +000016322err_out_power_down:
16323 pci_set_power_state(pdev, PCI_D3hot);
16324
Linus Torvalds1da177e2005-04-16 15:20:36 -070016325err_out_free_res:
16326 pci_release_regions(pdev);
16327
16328err_out_disable_pdev:
16329 pci_disable_device(pdev);
16330 pci_set_drvdata(pdev, NULL);
16331 return err;
16332}
16333
16334static void __devexit tg3_remove_one(struct pci_dev *pdev)
16335{
16336 struct net_device *dev = pci_get_drvdata(pdev);
16337
16338 if (dev) {
16339 struct tg3 *tp = netdev_priv(dev);
16340
Jesper Juhle3c55302012-04-09 22:50:15 +020016341 release_firmware(tp->fw);
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -080016342
Matt Carlsondb219972011-11-04 09:15:03 +000016343 tg3_reset_task_cancel(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070016344
David S. Miller1805b2f2011-10-24 18:18:09 -040016345 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016346 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070016347 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016348 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070016349
Linus Torvalds1da177e2005-04-16 15:20:36 -070016350 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070016351 if (tp->aperegs) {
16352 iounmap(tp->aperegs);
16353 tp->aperegs = NULL;
16354 }
Michael Chan68929142005-08-09 20:17:14 -070016355 if (tp->regs) {
16356 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070016357 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070016358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016359 free_netdev(dev);
16360 pci_release_regions(pdev);
16361 pci_disable_device(pdev);
16362 pci_set_drvdata(pdev, NULL);
16363 }
16364}
16365
Eric Dumazetaa6027c2011-01-01 05:22:46 +000016366#ifdef CONFIG_PM_SLEEP
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016367static int tg3_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016368{
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016369 struct pci_dev *pdev = to_pci_dev(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016370 struct net_device *dev = pci_get_drvdata(pdev);
16371 struct tg3 *tp = netdev_priv(dev);
16372 int err;
16373
16374 if (!netif_running(dev))
16375 return 0;
16376
Matt Carlsondb219972011-11-04 09:15:03 +000016377 tg3_reset_task_cancel(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016378 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016379 tg3_netif_stop(tp);
16380
Matt Carlson21f76382012-02-22 12:35:21 +000016381 tg3_timer_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016382
David S. Millerf47c11e2005-06-24 20:18:35 -070016383 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016384 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070016385 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016386
16387 netif_device_detach(dev);
16388
David S. Millerf47c11e2005-06-24 20:18:35 -070016389 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070016390 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Joe Perches63c3a662011-04-26 08:12:10 +000016391 tg3_flag_clear(tp, INIT_COMPLETE);
David S. Millerf47c11e2005-06-24 20:18:35 -070016392 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016393
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016394 err = tg3_power_down_prepare(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016395 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016396 int err2;
16397
David S. Millerf47c11e2005-06-24 20:18:35 -070016398 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016399
Joe Perches63c3a662011-04-26 08:12:10 +000016400 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016401 err2 = tg3_restart_hw(tp, 1);
16402 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070016403 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016404
Matt Carlson21f76382012-02-22 12:35:21 +000016405 tg3_timer_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016406
16407 netif_device_attach(dev);
16408 tg3_netif_start(tp);
16409
Michael Chanb9ec6c12006-07-25 16:37:27 -070016410out:
David S. Millerf47c11e2005-06-24 20:18:35 -070016411 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016412
16413 if (!err2)
16414 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016415 }
16416
16417 return err;
16418}
16419
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016420static int tg3_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016421{
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016422 struct pci_dev *pdev = to_pci_dev(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016423 struct net_device *dev = pci_get_drvdata(pdev);
16424 struct tg3 *tp = netdev_priv(dev);
16425 int err;
16426
16427 if (!netif_running(dev))
16428 return 0;
16429
Linus Torvalds1da177e2005-04-16 15:20:36 -070016430 netif_device_attach(dev);
16431
David S. Millerf47c11e2005-06-24 20:18:35 -070016432 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016433
Joe Perches63c3a662011-04-26 08:12:10 +000016434 tg3_flag_set(tp, INIT_COMPLETE);
Michael Chanb9ec6c12006-07-25 16:37:27 -070016435 err = tg3_restart_hw(tp, 1);
16436 if (err)
16437 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016438
Matt Carlson21f76382012-02-22 12:35:21 +000016439 tg3_timer_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016440
Linus Torvalds1da177e2005-04-16 15:20:36 -070016441 tg3_netif_start(tp);
16442
Michael Chanb9ec6c12006-07-25 16:37:27 -070016443out:
David S. Millerf47c11e2005-06-24 20:18:35 -070016444 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016445
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070016446 if (!err)
16447 tg3_phy_start(tp);
16448
Michael Chanb9ec6c12006-07-25 16:37:27 -070016449 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016450}
16451
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016452static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
Eric Dumazetaa6027c2011-01-01 05:22:46 +000016453#define TG3_PM_OPS (&tg3_pm_ops)
16454
16455#else
16456
16457#define TG3_PM_OPS NULL
16458
16459#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000016460
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016461/**
16462 * tg3_io_error_detected - called when PCI error is detected
16463 * @pdev: Pointer to PCI device
16464 * @state: The current pci connection state
16465 *
16466 * This function is called after a PCI bus error affecting
16467 * this device has been detected.
16468 */
16469static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16470 pci_channel_state_t state)
16471{
16472 struct net_device *netdev = pci_get_drvdata(pdev);
16473 struct tg3 *tp = netdev_priv(netdev);
16474 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16475
16476 netdev_info(netdev, "PCI I/O error detected\n");
16477
16478 rtnl_lock();
16479
16480 if (!netif_running(netdev))
16481 goto done;
16482
16483 tg3_phy_stop(tp);
16484
16485 tg3_netif_stop(tp);
16486
Matt Carlson21f76382012-02-22 12:35:21 +000016487 tg3_timer_stop(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016488
16489 /* Want to make sure that the reset task doesn't run */
Matt Carlsondb219972011-11-04 09:15:03 +000016490 tg3_reset_task_cancel(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016491
16492 netif_device_detach(netdev);
16493
16494 /* Clean up software state, even if MMIO is blocked */
16495 tg3_full_lock(tp, 0);
16496 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16497 tg3_full_unlock(tp);
16498
16499done:
16500 if (state == pci_channel_io_perm_failure)
16501 err = PCI_ERS_RESULT_DISCONNECT;
16502 else
16503 pci_disable_device(pdev);
16504
16505 rtnl_unlock();
16506
16507 return err;
16508}
16509
16510/**
16511 * tg3_io_slot_reset - called after the pci bus has been reset.
16512 * @pdev: Pointer to PCI device
16513 *
16514 * Restart the card from scratch, as if from a cold-boot.
16515 * At this point, the card has exprienced a hard reset,
16516 * followed by fixups by BIOS, and has its config space
16517 * set up identically to what it was at cold boot.
16518 */
16519static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16520{
16521 struct net_device *netdev = pci_get_drvdata(pdev);
16522 struct tg3 *tp = netdev_priv(netdev);
16523 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16524 int err;
16525
16526 rtnl_lock();
16527
16528 if (pci_enable_device(pdev)) {
16529 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16530 goto done;
16531 }
16532
16533 pci_set_master(pdev);
16534 pci_restore_state(pdev);
16535 pci_save_state(pdev);
16536
16537 if (!netif_running(netdev)) {
16538 rc = PCI_ERS_RESULT_RECOVERED;
16539 goto done;
16540 }
16541
16542 err = tg3_power_up(tp);
Matt Carlsonbed98292011-07-13 09:27:29 +000016543 if (err)
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016544 goto done;
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016545
16546 rc = PCI_ERS_RESULT_RECOVERED;
16547
16548done:
16549 rtnl_unlock();
16550
16551 return rc;
16552}
16553
16554/**
16555 * tg3_io_resume - called when traffic can start flowing again.
16556 * @pdev: Pointer to PCI device
16557 *
16558 * This callback is called when the error recovery driver tells
16559 * us that its OK to resume normal operation.
16560 */
16561static void tg3_io_resume(struct pci_dev *pdev)
16562{
16563 struct net_device *netdev = pci_get_drvdata(pdev);
16564 struct tg3 *tp = netdev_priv(netdev);
16565 int err;
16566
16567 rtnl_lock();
16568
16569 if (!netif_running(netdev))
16570 goto done;
16571
16572 tg3_full_lock(tp, 0);
Joe Perches63c3a662011-04-26 08:12:10 +000016573 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016574 err = tg3_restart_hw(tp, 1);
16575 tg3_full_unlock(tp);
16576 if (err) {
16577 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16578 goto done;
16579 }
16580
16581 netif_device_attach(netdev);
16582
Matt Carlson21f76382012-02-22 12:35:21 +000016583 tg3_timer_start(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016584
16585 tg3_netif_start(tp);
16586
16587 tg3_phy_start(tp);
16588
16589done:
16590 rtnl_unlock();
16591}
16592
Stephen Hemminger3646f0e2012-09-07 09:33:15 -070016593static const struct pci_error_handlers tg3_err_handler = {
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016594 .error_detected = tg3_io_error_detected,
16595 .slot_reset = tg3_io_slot_reset,
16596 .resume = tg3_io_resume
16597};
16598
Linus Torvalds1da177e2005-04-16 15:20:36 -070016599static struct pci_driver tg3_driver = {
16600 .name = DRV_MODULE_NAME,
16601 .id_table = tg3_pci_tbl,
16602 .probe = tg3_init_one,
16603 .remove = __devexit_p(tg3_remove_one),
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000016604 .err_handler = &tg3_err_handler,
Eric Dumazetaa6027c2011-01-01 05:22:46 +000016605 .driver.pm = TG3_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -070016606};
16607
16608static int __init tg3_init(void)
16609{
Jeff Garzik29917622006-08-19 17:48:59 -040016610 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016611}
16612
16613static void __exit tg3_cleanup(void)
16614{
16615 pci_unregister_driver(&tg3_driver);
16616}
16617
16618module_init(tg3_init);
16619module_exit(tg3_cleanup);