blob: 2b2bee61ddd75864083ddd72710b0ab7932a2492 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Nithin Nayak Sujirb681b652013-01-06 12:51:10 +00007 * Copyright (C) 2005-2013 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
Matt Carlson6867c842010-07-11 09:31:44 +000021#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020027#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000029#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
Matt Carlson3110f5f52010-12-06 08:28:50 +000036#include <linux/mdio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070038#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070039#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070044#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020045#include <linux/dma-mapping.h>
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -080046#include <linux/firmware.h>
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000047#include <linux/ssb/ssb_driver_gige.h>
Michael Chanaed93e02012-07-16 16:24:02 +000048#include <linux/hwmon.h>
49#include <linux/hwmon-sysfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030052#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Javier Martinez Canillas27fd9de2011-03-26 16:42:31 +000054#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <asm/byteorder.h>
Javier Martinez Canillas27fd9de2011-03-26 16:42:31 +000056#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Matt Carlsonbe947302012-12-03 19:36:57 +000058#include <uapi/linux/net_tstamp.h>
59#include <linux/ptp_clock_kernel.h>
60
David S. Miller49b6e95f2007-03-29 01:38:42 -070061#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070063#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#endif
65
Matt Carlson63532392008-11-03 16:49:57 -080066#define BAR_0 0
67#define BAR_2 2
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include "tg3.h"
70
Joe Perches63c3a662011-04-26 08:12:10 +000071/* Functions & macros to verify TG3_FLAGS types */
72
73static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74{
75 return test_bit(flag, bits);
76}
77
78static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79{
80 set_bit(flag, bits);
81}
82
83static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84{
85 clear_bit(flag, bits);
86}
87
88#define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90#define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92#define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#define DRV_MODULE_NAME "tg3"
Matt Carlson6867c842010-07-11 09:31:44 +000096#define TG3_MAJ_NUM 3
Michael Chand8871992013-02-14 12:13:42 +000097#define TG3_MIN_NUM 130
Matt Carlson6867c842010-07-11 09:31:44 +000098#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
Michael Chand8871992013-02-14 12:13:42 +0000100#define DRV_MODULE_RELDATE "February 14, 2013"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1
104#define RESET_KIND_SUSPEND 2
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#define TG3_DEF_RX_MODE 0
107#define TG3_DEF_TX_MODE 0
108#define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
Matt Carlson520b2752011-06-13 13:39:02 +0000118#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120/* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
Joe Perches63c3a662011-04-26 08:12:10 +0000123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#define TG3_TX_TIMEOUT (5 * HZ)
125
126/* hardware minimum and maximum for a single frame's data payload */
127#define TG3_MIN_MTU 60
128#define TG3_MAX_MTU(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131/* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000135#define TG3_RX_STD_RING_SIZE(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
Matt Carlsonde9f5232011-04-05 14:22:43 +0000137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#define TG3_DEF_RX_RING_PENDING 200
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000139#define TG3_RX_JMB_RING_SIZE(tp) \
Joe Perches63c3a662011-04-26 08:12:10 +0000140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
Matt Carlsonde9f5232011-04-05 14:22:43 +0000141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142#define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144/* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
151#define TG3_TX_RING_SIZE 512
152#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
Matt Carlson2c49a442010-09-30 10:34:35 +0000154#define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156#define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158#define TG3_RX_RCB_RING_BYTES(tp) \
Matt Carlson7cb32cf2010-09-30 10:34:36 +0000159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
Matt Carlson287be122009-08-28 13:58:46 +0000164#define TG3_DMA_BYTE_ENAB 64
165
166#define TG3_RX_STD_DMA_SZ 1536
167#define TG3_RX_JMB_DMA_SZ 9046
168
169#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Matt Carlson2c49a442010-09-30 10:34:35 +0000174#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
Matt Carlson2b2cdb62009-11-13 13:03:48 +0000176
Matt Carlson2c49a442010-09-30 10:34:35 +0000177#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
Matt Carlson2b2cdb62009-11-13 13:03:48 +0000179
Matt Carlsond2757fc2010-04-12 06:58:27 +0000180/* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191#define TG3_RX_COPY_THRESHOLD 256
192#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194#else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196#endif
197
Matt Carlson81389f52011-08-31 11:44:49 +0000198#if (NET_IP_ALIGN != 0)
199#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200#else
Eric Dumazet9205fd92011-11-18 06:47:01 +0000201#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
Matt Carlson81389f52011-08-31 11:44:49 +0000202#endif
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204/* minimum number of free TX descriptors required to wake up TX process */
Matt Carlsonf3f3f272009-08-28 14:03:21 +0000205#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
Matt Carlson55086ad2011-12-14 11:09:59 +0000206#define TG3_TX_BD_DMA_MAX_2K 2048
Matt Carlsona4cb4282011-12-14 11:09:58 +0000207#define TG3_TX_BD_DMA_MAX_4K 4096
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Matt Carlsonad829262008-11-21 17:16:16 -0800209#define TG3_RAW_IP_ALIGN 2
210
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000211#define TG3_FW_UPDATE_TIMEOUT_SEC 5
Matt Carlson21f76382012-02-22 12:35:21 +0000212#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000213
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -0800214#define FIRMWARE_TG3 "tigon/tg3.bin"
Nithin Sujirc4dab502013-03-06 17:02:34 +0000215#define FIRMWARE_TG357766 "tigon/tg357766.bin"
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -0800216#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
Bill Pemberton229b1ad2012-12-03 09:22:59 -0500219static char version[] =
Joe Perches05dbe002010-02-17 19:44:19 +0000220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224MODULE_LICENSE("GPL");
225MODULE_VERSION(DRV_MODULE_VERSION);
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -0800226MODULE_FIRMWARE(FIRMWARE_TG3);
227MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231module_param(tg3_debug, int, 0);
232MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000234#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000237static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +0000268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson2befdce2009-08-28 12:28:45 +0000309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson321d32a2008-11-21 17:22:19 -0800317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson5e7ccf22009-08-25 10:08:42 +0000321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
Matt Carlson5001e2f2009-11-13 13:03:51 +0000322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
Michael Chan79d49692012-11-05 14:26:29 +0000323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
Matt Carlson5001e2f2009-11-13 13:03:51 +0000324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
Matt Carlsonb0f75222010-01-20 16:58:11 +0000325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +0000329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
Matt Carlson302b5002010-06-05 17:24:38 +0000333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
Matt Carlsonba1f3c72011-04-05 14:22:50 +0000334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
Greg KH02eca3f2012-07-12 15:39:44 +0000335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
Matt Carlsond3f677a2013-02-14 14:27:51 +0000336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
Michael Chanc86a8562013-01-06 12:51:08 +0000337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
Meelis Roos1dcb14d2011-05-25 05:43:47 +0000347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700348 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349};
350
351MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
Andreas Mohr50da8592006-08-14 23:54:30 -0700353static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 const char string[ETH_GSTRING_LEN];
Matt Carlson48fa55a2011-04-13 11:05:06 +0000355} ethtool_stats_keys[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
Matt Carlson4452d092011-05-19 12:12:51 +0000431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434};
435
Matt Carlson48fa55a2011-04-13 11:05:06 +0000436#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +0000437#define TG3_NVRAM_TEST 0
438#define TG3_LINK_TEST 1
439#define TG3_REGISTER_TEST 2
440#define TG3_MEMORY_TEST 3
441#define TG3_MAC_LOOPB_TEST 4
442#define TG3_PHY_LOOPB_TEST 5
443#define TG3_EXT_LOOPB_TEST 6
444#define TG3_INTERRUPT_TEST 7
Matt Carlson48fa55a2011-04-13 11:05:06 +0000445
446
Andreas Mohr50da8592006-08-14 23:54:30 -0700447static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700448 const char string[ETH_GSTRING_LEN];
Matt Carlson48fa55a2011-04-13 11:05:06 +0000449} ethtool_test_keys[] = {
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +0000450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
Michael Chan4cafd3f2005-05-29 14:56:34 -0700458};
459
Matt Carlson48fa55a2011-04-13 11:05:06 +0000460#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
Michael Chanb401e9e2005-12-19 16:27:04 -0800463static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464{
465 writel(val, tp->regs + off);
466}
467
468static u32 tg3_read32(struct tg3 *tp, u32 off)
469{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000470 return readl(tp->regs + off);
Michael Chanb401e9e2005-12-19 16:27:04 -0800471}
472
Matt Carlson0d3031d2007-10-10 18:02:43 -0700473static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474{
475 writel(val, tp->aperegs + off);
476}
477
478static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000480 return readl(tp->aperegs + off);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700481}
482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484{
Michael Chan68929142005-08-09 20:17:14 -0700485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700491}
492
493static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494{
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
Michael Chan68929142005-08-09 20:17:14 -0700499static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500{
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509}
510
511static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512{
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
Matt Carlson66711e62009-11-13 13:03:49 +0000520 if (off == TG3_RX_STD_PROD_IDX_REG) {
Michael Chan68929142005-08-09 20:17:14 -0700521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539}
540
541static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542{
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551}
552
Michael Chanb401e9e2005-12-19 16:27:04 -0800553/* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
Joe Perches63c3a662011-04-26 08:12:10 +0000560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
Michael Chanb401e9e2005-12-19 16:27:04 -0800561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
576
Michael Chan09ee9292005-08-09 20:17:00 -0700577static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578{
579 tp->write32_mbox(tp, off, val);
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +0000580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
Michael Chan68929142005-08-09 20:17:14 -0700583 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700584}
585
Michael Chan20094932005-08-09 20:16:32 -0700586static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
Joe Perches63c3a662011-04-26 08:12:10 +0000590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 writel(val, mbox);
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +0000592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 readl(mbox);
595}
596
Michael Chanb5d37722006-09-27 16:06:21 -0700597static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598{
Matt Carlsonde6f31e2010-04-12 06:58:30 +0000599 return readl(tp->regs + off + GRCMBOX_BASE);
Michael Chanb5d37722006-09-27 16:06:21 -0700600}
601
602static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603{
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605}
606
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000607#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700608#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000609#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700612
Matt Carlsonc6cdf432010-04-05 10:19:26 +0000613#define tw32(reg, val) tp->write32(tp, reg, val)
614#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
618static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619{
Michael Chan68929142005-08-09 20:17:14 -0700620 unsigned long flags;
621
Joe Perches41535772013-02-16 11:20:04 +0000622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
Michael Chanb5d37722006-09-27 16:06:21 -0700623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
Michael Chan68929142005-08-09 20:17:14 -0700626 spin_lock_irqsave(&tp->indirect_lock, flags);
Joe Perches63c3a662011-04-26 08:12:10 +0000627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
Michael Chanbbadf502006-04-06 21:46:34 -0700628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Michael Chanbbadf502006-04-06 21:46:34 -0700631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
Michael Chan68929142005-08-09 20:17:14 -0700640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641}
642
643static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644{
Michael Chan68929142005-08-09 20:17:14 -0700645 unsigned long flags;
646
Joe Perches41535772013-02-16 11:20:04 +0000647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
Michael Chanb5d37722006-09-27 16:06:21 -0700648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
Michael Chan68929142005-08-09 20:17:14 -0700653 spin_lock_irqsave(&tp->indirect_lock, flags);
Joe Perches63c3a662011-04-26 08:12:10 +0000654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
Michael Chanbbadf502006-04-06 21:46:34 -0700655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Michael Chanbbadf502006-04-06 21:46:34 -0700658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
Michael Chan68929142005-08-09 20:17:14 -0700667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668}
669
Matt Carlson0d3031d2007-10-10 18:02:43 -0700670static void tg3_ape_lock_init(struct tg3 *tp)
671{
672 int i;
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000673 u32 regbase, bit;
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000674
Joe Perches41535772013-02-16 11:20:04 +0000675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700679
680 /* Make sure the driver hasn't any stale locks. */
Matt Carlson78f94dc2011-11-04 09:14:58 +0000681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000696 }
697
Matt Carlson0d3031d2007-10-10 18:02:43 -0700698}
699
700static int tg3_ape_lock(struct tg3 *tp, int locknum)
701{
702 int i, off;
703 int ret = 0;
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000704 u32 status, req, gnt, bit;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700705
Joe Perches63c3a662011-04-26 08:12:10 +0000706 if (!tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -0700707 return 0;
708
709 switch (locknum) {
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000710 case TG3_APE_LOCK_GPIO:
Joe Perches41535772013-02-16 11:20:04 +0000711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000712 return 0;
Matt Carlson33f401a2010-04-05 10:19:27 +0000713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
Matt Carlson78f94dc2011-11-04 09:14:58 +0000715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
Matt Carlson33f401a2010-04-05 10:19:27 +0000719 break;
Michael Chan8151ad52012-07-29 19:15:41 +0000720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
Matt Carlson33f401a2010-04-05 10:19:27 +0000726 default:
727 return -EINVAL;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700728 }
729
Joe Perches41535772013-02-16 11:20:04 +0000730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
Matt Carlson0d3031d2007-10-10 18:02:43 -0700738 off = 4 * locknum;
739
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000740 tg3_ape_write32(tp, req + off, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000744 status = tg3_ape_read32(tp, gnt + off);
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000745 if (status == bit)
Matt Carlson0d3031d2007-10-10 18:02:43 -0700746 break;
747 udelay(10);
748 }
749
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000750 if (status != bit) {
Matt Carlson0d3031d2007-10-10 18:02:43 -0700751 /* Revoke the lock request. */
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000752 tg3_ape_write32(tp, gnt + off, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700753 ret = -EBUSY;
754 }
755
756 return ret;
757}
758
759static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760{
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000761 u32 gnt, bit;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700762
Joe Perches63c3a662011-04-26 08:12:10 +0000763 if (!tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -0700764 return;
765
766 switch (locknum) {
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000767 case TG3_APE_LOCK_GPIO:
Joe Perches41535772013-02-16 11:20:04 +0000768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000769 return;
Matt Carlson33f401a2010-04-05 10:19:27 +0000770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
Matt Carlson78f94dc2011-11-04 09:14:58 +0000772 if (!tp->pci_fn)
773 bit = APE_LOCK_GRANT_DRIVER;
774 else
775 bit = 1 << tp->pci_fn;
Matt Carlson33f401a2010-04-05 10:19:27 +0000776 break;
Michael Chan8151ad52012-07-29 19:15:41 +0000777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
782 break;
Matt Carlson33f401a2010-04-05 10:19:27 +0000783 default:
784 return;
Matt Carlson0d3031d2007-10-10 18:02:43 -0700785 }
786
Joe Perches41535772013-02-16 11:20:04 +0000787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
Matt Carlsonf92d9dc12010-06-05 17:24:30 +0000788 gnt = TG3_APE_LOCK_GRANT;
789 else
790 gnt = TG3_APE_PER_LOCK_GRANT;
791
Matt Carlson6f5c8f832011-07-13 09:27:31 +0000792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
Matt Carlson0d3031d2007-10-10 18:02:43 -0700793}
794
Matt Carlsonb65a3722012-07-16 16:24:00 +0000795static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000796{
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000797 u32 apedata;
798
Matt Carlsonb65a3722012-07-16 16:24:00 +0000799 while (timeout_us) {
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
Matt Carlsonb65a3722012-07-16 16:24:00 +0000801 return -EBUSY;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000802
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 break;
806
Matt Carlsonb65a3722012-07-16 16:24:00 +0000807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809 udelay(10);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000811 }
812
Matt Carlsonb65a3722012-07-16 16:24:00 +0000813 return timeout_us ? 0 : -EBUSY;
814}
815
Matt Carlsoncf8d55a2012-07-16 16:24:01 +0000816static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817{
818 u32 i, apedata;
819
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 break;
825
826 udelay(10);
827 }
828
829 return i == timeout_us / 10;
830}
831
Michael Chan86449942012-10-02 20:31:14 -0700832static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 u32 len)
Matt Carlsoncf8d55a2012-07-16 16:24:01 +0000834{
835 int err;
836 u32 i, bufoff, msgoff, maxlen, apedata;
837
838 if (!tg3_flag(tp, APE_HAS_NCSI))
839 return 0;
840
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
843 return -ENODEV;
844
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
847 return -EAGAIN;
848
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 TG3_APE_SHMEM_BASE;
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854 while (len) {
855 u32 length;
856
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
859 len -= length;
860
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
863 return -EAGAIN;
864
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
867 if (err)
868 return err;
869
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881 base_off += length;
882
883 if (tg3_ape_wait_for_event(tp, 30000))
884 return -EAGAIN;
885
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
889 data++;
890 }
891 }
892
893 return 0;
894}
895
Matt Carlsonb65a3722012-07-16 16:24:00 +0000896static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897{
898 int err;
899 u32 apedata;
900
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
903 return -EAGAIN;
904
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
907 return -EAGAIN;
908
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
911 if (err)
912 return err;
913
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
916
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920 return 0;
Matt Carlsonfd6d3f02011-08-31 11:44:52 +0000921}
922
923static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924{
925 u32 event;
926 u32 apedata;
927
928 if (!tg3_flag(tp, ENABLE_APE))
929 return;
930
931 switch (kind) {
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
945
946 event = APE_EVENT_STATUS_STATE_START;
947 break;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
953 */
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 } else
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 break;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
970 break;
971 default:
972 return;
973 }
974
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977 tg3_ape_send_event(tp, event);
978}
979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980static void tg3_disable_ints(struct tg3 *tp)
981{
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000982 int i;
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988}
989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990static void tg3_enable_ints(struct tg3 *tp)
991{
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000992 int i;
Matt Carlson89aeb3b2009-09-01 13:08:58 +0000993
Michael Chanbbe832c2005-06-24 20:20:04 -0700994 tp->irq_sync = 0;
995 wmb();
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Matt Carlsonf19af9c2009-09-01 12:47:49 +0000999
Matt Carlsonf89f38b2010-02-12 14:47:07 +00001000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
Matt Carlson89aeb3b2009-09-01 13:08:58 +00001001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlsonc6cdf432010-04-05 10:19:26 +00001003
Matt Carlson89aeb3b2009-09-01 13:08:58 +00001004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
Joe Perches63c3a662011-04-26 08:12:10 +00001005 if (tg3_flag(tp, 1SHOT_MSI))
Matt Carlson89aeb3b2009-09-01 13:08:58 +00001006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
Matt Carlsonf89f38b2010-02-12 14:47:07 +00001008 tp->coal_now |= tnapi->coal_now;
Matt Carlson89aeb3b2009-09-01 13:08:58 +00001009 }
Matt Carlsonf19af9c2009-09-01 12:47:49 +00001010
1011 /* Force an initial interrupt */
Joe Perches63c3a662011-04-26 08:12:10 +00001012 if (!tg3_flag(tp, TAGGED_STATUS) &&
Matt Carlsonf19af9c2009-09-01 12:47:49 +00001013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 else
Matt Carlsonf89f38b2010-02-12 14:47:07 +00001016 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019}
1020
Matt Carlson17375d22009-08-28 14:02:18 +00001021static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
Michael Chan04237dd2005-04-25 15:17:17 -07001022{
Matt Carlson17375d22009-08-28 14:02:18 +00001023 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00001024 struct tg3_hw_status *sblk = tnapi->hw_status;
Michael Chan04237dd2005-04-25 15:17:17 -07001025 unsigned int work_exists = 0;
1026
1027 /* check for phy events */
Joe Perches63c3a662011-04-26 08:12:10 +00001028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
Michael Chan04237dd2005-04-25 15:17:17 -07001029 if (sblk->status & SD_STATUS_LINK_CHG)
1030 work_exists = 1;
1031 }
Matt Carlsonf891ea12012-04-24 13:37:01 +00001032
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 work_exists = 1;
1036
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00001039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
Michael Chan04237dd2005-04-25 15:17:17 -07001040 work_exists = 1;
1041
1042 return work_exists;
1043}
1044
Matt Carlson17375d22009-08-28 14:02:18 +00001045/* tg3_int_reenable
Michael Chan04237dd2005-04-25 15:17:17 -07001046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001048 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 */
Matt Carlson17375d22009-08-28 14:02:18 +00001050static void tg3_int_reenable(struct tg3_napi *tnapi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051{
Matt Carlson17375d22009-08-28 14:02:18 +00001052 struct tg3 *tp = tnapi->tp;
1053
Matt Carlson898a56f2009-08-28 14:02:40 +00001054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 mmiowb();
1056
David S. Millerfac9b832005-05-18 22:46:34 -07001057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1060 */
Joe Perches63c3a662011-04-26 08:12:10 +00001061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
Michael Chan04237dd2005-04-25 15:17:17 -07001062 tw32(HOSTCC_MODE, tp->coalesce_mode |
Matt Carlsonfd2ce372009-09-01 12:51:13 +00001063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064}
1065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066static void tg3_switch_clocks(struct tg3 *tp)
1067{
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00001068 u32 clock_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 u32 orig_clock_ctrl;
1070
Joe Perches63c3a662011-04-26 08:12:10 +00001071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -07001072 return;
1073
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00001074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1079 0x1f);
1080 tp->pci_clock_ctrl = clock_ctrl;
1081
Joe Perches63c3a662011-04-26 08:12:10 +00001082 if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 }
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 clock_ctrl |
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 40);
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097}
1098
1099#define PHY_BUSY_LOOPS 5000
1100
Hauke Mehrtens5c358042013-02-07 05:37:38 +00001101static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103{
1104 u32 frame_val;
1105 unsigned int loops;
1106 int ret;
1107
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 tw32_f(MAC_MI_MODE,
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 udelay(80);
1112 }
1113
Michael Chan8151ad52012-07-29 19:15:41 +00001114 tg3_ape_lock(tp, tp->phy_ape_lock);
1115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 *val = 0x0;
1117
Hauke Mehrtens5c358042013-02-07 05:37:38 +00001118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 tw32_f(MAC_MI_COM, frame_val);
1125
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1128 udelay(10);
1129 frame_val = tr32(MAC_MI_COM);
1130
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1132 udelay(5);
1133 frame_val = tr32(MAC_MI_COM);
1134 break;
1135 }
1136 loops -= 1;
1137 }
1138
1139 ret = -EBUSY;
1140 if (loops != 0) {
1141 *val = frame_val & MI_COM_DATA_MASK;
1142 ret = 0;
1143 }
1144
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 udelay(80);
1148 }
1149
Michael Chan8151ad52012-07-29 19:15:41 +00001150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 return ret;
1153}
1154
Hauke Mehrtens5c358042013-02-07 05:37:38 +00001155static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156{
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158}
1159
1160static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162{
1163 u32 frame_val;
1164 unsigned int loops;
1165 int ret;
1166
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
Matt Carlson221c5632011-06-13 13:39:01 +00001168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
Michael Chanb5d37722006-09-27 16:06:21 -07001169 return 0;
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE,
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 udelay(80);
1175 }
1176
Michael Chan8151ad52012-07-29 19:15:41 +00001177 tg3_ape_lock(tp, tp->phy_ape_lock);
1178
Hauke Mehrtens5c358042013-02-07 05:37:38 +00001179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 tw32_f(MAC_MI_COM, frame_val);
1187
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1190 udelay(10);
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1193 udelay(5);
1194 frame_val = tr32(MAC_MI_COM);
1195 break;
1196 }
1197 loops -= 1;
1198 }
1199
1200 ret = -EBUSY;
1201 if (loops != 0)
1202 ret = 0;
1203
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 udelay(80);
1207 }
1208
Michael Chan8151ad52012-07-29 19:15:41 +00001209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 return ret;
1212}
1213
Hauke Mehrtens5c358042013-02-07 05:37:38 +00001214static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215{
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217}
1218
Matt Carlsonb0988c12011-04-20 07:57:39 +00001219static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220{
1221 int err;
1222
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 if (err)
1225 goto done;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 if (err)
1234 goto done;
1235
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238done:
1239 return err;
1240}
1241
1242static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243{
1244 int err;
1245
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 if (err)
1252 goto done;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 if (err)
1257 goto done;
1258
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261done:
1262 return err;
1263}
1264
1265static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266{
1267 int err;
1268
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 if (!err)
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273 return err;
1274}
1275
1276static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277{
1278 int err;
1279
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 if (!err)
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284 return err;
1285}
1286
Matt Carlson15ee95c2011-04-20 07:57:40 +00001287static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288{
1289 int err;
1290
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 if (!err)
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297 return err;
1298}
1299
Matt Carlsonb4bd2922011-04-20 07:57:41 +00001300static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301{
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306}
1307
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00001308static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309{
1310 u32 val;
1311 int err;
Matt Carlson1d36ba42011-04-20 07:57:42 +00001312
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00001313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315 if (err)
1316 return err;
1317 if (enable)
1318
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 else
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326 return err;
1327}
Matt Carlson1d36ba42011-04-20 07:57:42 +00001328
Matt Carlson95e28692008-05-25 23:44:14 -07001329static int tg3_bmcr_reset(struct tg3 *tp)
1330{
1331 u32 phy_control;
1332 int limit, err;
1333
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1336 */
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 if (err != 0)
1340 return -EBUSY;
1341
1342 limit = 5000;
1343 while (limit--) {
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if (err != 0)
1346 return -EBUSY;
1347
1348 if ((phy_control & BMCR_RESET) == 0) {
1349 udelay(40);
1350 break;
1351 }
1352 udelay(10);
1353 }
Roel Kluind4675b52009-02-12 16:33:27 -08001354 if (limit < 0)
Matt Carlson95e28692008-05-25 23:44:14 -07001355 return -EBUSY;
1356
1357 return 0;
1358}
1359
Matt Carlson158d7ab2008-05-29 01:37:54 -07001360static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361{
Francois Romieu3d165432009-01-19 16:56:50 -08001362 struct tg3 *tp = bp->priv;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001363 u32 val;
1364
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001365 spin_lock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001366
1367 if (tg3_readphy(tp, reg, &val))
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001368 val = -EIO;
1369
1370 spin_unlock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001371
1372 return val;
1373}
1374
1375static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376{
Francois Romieu3d165432009-01-19 16:56:50 -08001377 struct tg3 *tp = bp->priv;
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001378 u32 ret = 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001379
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001380 spin_lock_bh(&tp->lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001381
1382 if (tg3_writephy(tp, reg, val))
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001383 ret = -EIO;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001384
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001385 spin_unlock_bh(&tp->lock);
1386
1387 return ret;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001388}
1389
1390static int tg3_mdio_reset(struct mii_bus *bp)
1391{
1392 return 0;
1393}
1394
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001395static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -07001396{
1397 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001398 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -07001399
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlson6a443a02010-02-17 15:17:04 +00001402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001404 val = MAC_PHYCFG2_50610_LED_MODES;
1405 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001406 case PHY_ID_BCMAC131:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001407 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001409 case PHY_ID_RTL8211C:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001412 case PHY_ID_RTL8201E:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 break;
1415 default:
Matt Carlsona9daf362008-05-25 23:49:44 -07001416 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001417 }
1418
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1421
1422 val = tr32(MAC_PHYCFG1);
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001426 tw32(MAC_PHYCFG1, val);
1427
1428 return;
1429 }
1430
Joe Perches63c3a662011-04-26 08:12:10 +00001431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1438
1439 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -07001440
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
Joe Perches63c3a662011-04-26 08:12:10 +00001444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
Joe Perches63c3a662011-04-26 08:12:10 +00001447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 }
Matt Carlsonbb85fbb2009-08-25 10:09:07 +00001450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
Matt Carlsona9daf362008-05-25 23:49:44 -07001453
Matt Carlsona9daf362008-05-25 23:49:44 -07001454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
Joe Perches63c3a662011-04-26 08:12:10 +00001462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
Joe Perches63c3a662011-04-26 08:12:10 +00001468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1472 }
1473 tw32(MAC_EXT_RGMII_MODE, val);
1474}
1475
Matt Carlson158d7ab2008-05-29 01:37:54 -07001476static void tg3_mdio_start(struct tg3 *tp)
1477{
Matt Carlson158d7ab2008-05-29 01:37:54 -07001478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -07001481
Joe Perches63c3a662011-04-26 08:12:10 +00001482 if (tg3_flag(tp, MDIOBUS_INITED) &&
Joe Perches41535772013-02-16 11:20:04 +00001483 tg3_asic_rev(tp) == ASIC_REV_5785)
Matt Carlson9ea48182010-02-17 15:17:01 +00001484 tg3_mdio_config_5785(tp);
1485}
1486
1487static int tg3_mdio_init(struct tg3 *tp)
1488{
1489 int i;
1490 u32 reg;
1491 struct phy_device *phydev;
1492
Joe Perches63c3a662011-04-26 08:12:10 +00001493 if (tg3_flag(tp, 5717_PLUS)) {
Matt Carlson9c7df912010-06-05 17:24:36 +00001494 u32 is_serdes;
Matt Carlson882e9792009-09-01 13:21:36 +00001495
Matt Carlson69f11c92011-07-13 09:27:30 +00001496 tp->phy_addr = tp->pci_fn + 1;
Matt Carlson882e9792009-09-01 13:21:36 +00001497
Joe Perches41535772013-02-16 11:20:04 +00001498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
Matt Carlsond1ec96a2010-01-12 10:11:38 +00001499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 else
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
Matt Carlson882e9792009-09-01 13:21:36 +00001503 if (is_serdes)
1504 tp->phy_addr += 7;
1505 } else
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001506 tp->phy_addr = TG3_PHY_MII_ADDR;
Matt Carlson882e9792009-09-01 13:21:36 +00001507
Matt Carlson158d7ab2008-05-29 01:37:54 -07001508 tg3_mdio_start(tp);
1509
Joe Perches63c3a662011-04-26 08:12:10 +00001510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
Matt Carlson158d7ab2008-05-29 01:37:54 -07001511 return 0;
1512
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1515 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001516
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -07001519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001526 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001527
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001529 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001530
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1535 */
1536 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 tg3_bmcr_reset(tp);
1538
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001539 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001540 if (i) {
Matt Carlsonab96b242010-04-05 10:19:22 +00001541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001542 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001543 return i;
1544 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001545
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001547
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001548 if (!phydev || !phydev->drv) {
Matt Carlsonab96b242010-04-05 10:19:22 +00001549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1552 return -ENODEV;
1553 }
1554
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlson6a443a02010-02-17 15:17:04 +00001556 case PHY_ID_BCM57780:
Matt Carlson321d32a2008-11-21 17:22:19 -08001557 phydev->interface = PHY_INTERFACE_MODE_GMII;
Matt Carlsonc704dc22009-11-02 14:32:12 +00001558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
Matt Carlson321d32a2008-11-21 17:22:19 -08001559 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
Matt Carlson32e5a8d2009-11-02 14:31:39 +00001562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
Matt Carlsonc704dc22009-11-02 14:32:12 +00001563 PHY_BRCM_RX_REFCLK_UNUSED |
Matt Carlson52fae082009-11-02 14:32:38 +00001564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
Matt Carlsonc704dc22009-11-02 14:32:12 +00001565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
Matt Carlsona9daf362008-05-25 23:49:44 -07001567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00001570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
Matt Carlsona9daf362008-05-25 23:49:44 -07001571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001572 /* fallthru */
Matt Carlson6a443a02010-02-17 15:17:04 +00001573 case PHY_ID_RTL8211C:
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001575 break;
Matt Carlson6a443a02010-02-17 15:17:04 +00001576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
Matt Carlsona9daf362008-05-25 23:49:44 -07001578 phydev->interface = PHY_INTERFACE_MODE_MII;
Matt Carlsoncdd4e09d2009-11-02 14:31:11 +00001579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
Matt Carlsona9daf362008-05-25 23:49:44 -07001581 break;
1582 }
1583
Joe Perches63c3a662011-04-26 08:12:10 +00001584 tg3_flag_set(tp, MDIOBUS_INITED);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001585
Joe Perches41535772013-02-16 11:20:04 +00001586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001587 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001588
1589 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001590}
1591
1592static void tg3_mdio_fini(struct tg3 *tp)
1593{
Joe Perches63c3a662011-04-26 08:12:10 +00001594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001598 }
1599}
1600
Matt Carlson95e28692008-05-25 23:44:14 -07001601/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001602static inline void tg3_generate_fw_event(struct tg3 *tp)
1603{
1604 u32 val;
1605
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610 tp->last_event_jiffies = jiffies;
1611}
1612
1613#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001616static void tg3_wait_for_event_ack(struct tg3 *tp)
1617{
1618 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001619 unsigned int delay_cnt;
1620 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001621
Matt Carlson4ba526c2008-08-15 14:10:04 -07001622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 (long)jiffies;
1626 if (time_remain < 0)
1627 return;
1628
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1634
1635 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001638 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001639 }
1640}
1641
1642/* tp->lock is held. */
Matt Carlsonb28f3892012-02-13 15:20:12 +00001643static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
Matt Carlson95e28692008-05-25 23:44:14 -07001644{
Matt Carlsonb28f3892012-02-13 15:20:12 +00001645 u32 reg, val;
Matt Carlson95e28692008-05-25 23:44:14 -07001646
1647 val = 0;
1648 if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 val = reg << 16;
1650 if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 val |= (reg & 0xffff);
Matt Carlsonb28f3892012-02-13 15:20:12 +00001652 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001653
1654 val = 0;
1655 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 val = reg << 16;
1657 if (!tg3_readphy(tp, MII_LPA, &reg))
1658 val |= (reg & 0xffff);
Matt Carlsonb28f3892012-02-13 15:20:12 +00001659 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001660
1661 val = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
Matt Carlson95e28692008-05-25 23:44:14 -07001663 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 val |= (reg & 0xffff);
1667 }
Matt Carlsonb28f3892012-02-13 15:20:12 +00001668 *data++ = val;
Matt Carlson95e28692008-05-25 23:44:14 -07001669
1670 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 val = reg << 16;
1672 else
1673 val = 0;
Matt Carlsonb28f3892012-02-13 15:20:12 +00001674 *data++ = val;
1675}
1676
1677/* tp->lock is held. */
1678static void tg3_ump_link_report(struct tg3 *tp)
1679{
1680 u32 data[4];
1681
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 return;
1684
1685 tg3_phy_gather_ump_data(tp, data);
1686
1687 tg3_wait_for_event_ack(tp);
1688
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
Matt Carlson95e28692008-05-25 23:44:14 -07001695
Matt Carlson4ba526c2008-08-15 14:10:04 -07001696 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001697}
1698
Matt Carlson8d5a89b2011-08-31 11:44:51 +00001699/* tp->lock is held. */
1700static void tg3_stop_fw(struct tg3 *tp)
1701{
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1705
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708 tg3_generate_fw_event(tp);
1709
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1712 }
1713}
1714
Matt Carlsonfd6d3f02011-08-31 11:44:52 +00001715/* tp->lock is held. */
1716static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717{
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 switch (kind) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 DRV_STATE_START);
1726 break;
1727
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 DRV_STATE_UNLOAD);
1731 break;
1732
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 DRV_STATE_SUSPEND);
1736 break;
1737
1738 default:
1739 break;
1740 }
1741 }
1742
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1746}
1747
1748/* tp->lock is held. */
1749static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750{
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 switch (kind) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1756 break;
1757
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1761 break;
1762
1763 default:
1764 break;
1765 }
1766 }
1767
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1770}
1771
1772/* tp->lock is held. */
1773static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774{
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1776 switch (kind) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 DRV_STATE_START);
1780 break;
1781
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 DRV_STATE_UNLOAD);
1785 break;
1786
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 DRV_STATE_SUSPEND);
1790 break;
1791
1792 default:
1793 break;
1794 }
1795 }
1796}
1797
1798static int tg3_poll_fw(struct tg3 *tp)
1799{
1800 int i;
1801 u32 val;
1802
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +00001803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1805 return 0;
1806 }
1807
Joe Perches41535772013-02-16 11:20:04 +00001808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Matt Carlsonfd6d3f02011-08-31 11:44:52 +00001809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 return 0;
1813 udelay(100);
1814 }
1815 return -ENODEV;
1816 }
1817
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 break;
1823 udelay(10);
1824 }
1825
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1830 */
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834 netdev_info(tp->dev, "No firmware running\n");
1835 }
1836
Joe Perches41535772013-02-16 11:20:04 +00001837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
Matt Carlsonfd6d3f02011-08-31 11:44:52 +00001838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1840 */
1841 mdelay(10);
1842 }
1843
1844 return 0;
1845}
1846
Matt Carlson95e28692008-05-25 23:44:14 -07001847static void tg3_link_report(struct tg3 *tp)
1848{
1849 if (!netif_carrier_ok(tp->dev)) {
Joe Perches05dbe002010-02-17 19:44:19 +00001850 netif_info(tp, link, tp->dev, "Link is down\n");
Matt Carlson95e28692008-05-25 23:44:14 -07001851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
Joe Perches05dbe002010-02-17 19:44:19 +00001853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1855 1000 :
1856 (tp->link_config.active_speed == SPEED_100 ?
1857 100 : 10)),
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 "full" : "half"));
Matt Carlson95e28692008-05-25 23:44:14 -07001860
Joe Perches05dbe002010-02-17 19:44:19 +00001861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 "on" : "off",
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 "on" : "off");
Matt Carlson47007832011-04-20 07:57:43 +00001866
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1870
Matt Carlson95e28692008-05-25 23:44:14 -07001871 tg3_ump_link_report(tp);
1872 }
1873}
1874
Matt Carlson95e28692008-05-25 23:44:14 -07001875static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1876{
1877 u16 miireg;
1878
Steve Glendinninge18ce342008-12-16 02:00:00 -08001879 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
Matt Carlson95e28692008-05-25 23:44:14 -07001880 miireg = ADVERTISE_1000XPAUSE;
Steve Glendinninge18ce342008-12-16 02:00:00 -08001881 else if (flow_ctrl & FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001882 miireg = ADVERTISE_1000XPSE_ASYM;
Steve Glendinninge18ce342008-12-16 02:00:00 -08001883 else if (flow_ctrl & FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001884 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1885 else
1886 miireg = 0;
1887
1888 return miireg;
1889}
1890
Matt Carlson95e28692008-05-25 23:44:14 -07001891static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1892{
1893 u8 cap = 0;
1894
Matt Carlsonf3791cd2011-11-21 15:01:17 +00001895 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1896 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1897 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1898 if (lcladv & ADVERTISE_1000XPAUSE)
1899 cap = FLOW_CTRL_RX;
1900 if (rmtadv & ADVERTISE_1000XPAUSE)
Steve Glendinninge18ce342008-12-16 02:00:00 -08001901 cap = FLOW_CTRL_TX;
Matt Carlson95e28692008-05-25 23:44:14 -07001902 }
1903
1904 return cap;
1905}
1906
Matt Carlsonf51f3562008-05-25 23:45:08 -07001907static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001908{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001909 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001910 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001911 u32 old_rx_mode = tp->rx_mode;
1912 u32 old_tx_mode = tp->tx_mode;
1913
Joe Perches63c3a662011-04-26 08:12:10 +00001914 if (tg3_flag(tp, USE_PHYLIB))
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001915 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001916 else
1917 autoneg = tp->link_config.autoneg;
1918
Joe Perches63c3a662011-04-26 08:12:10 +00001919 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +00001920 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001921 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001922 else
Steve Glendinningbc02ff92008-12-16 02:00:48 -08001923 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
Matt Carlsonf51f3562008-05-25 23:45:08 -07001924 } else
1925 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001926
Matt Carlsonf51f3562008-05-25 23:45:08 -07001927 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001928
Steve Glendinninge18ce342008-12-16 02:00:00 -08001929 if (flowctrl & FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001930 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1931 else
1932 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1933
Matt Carlsonf51f3562008-05-25 23:45:08 -07001934 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001935 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001936
Steve Glendinninge18ce342008-12-16 02:00:00 -08001937 if (flowctrl & FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001938 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1939 else
1940 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1941
Matt Carlsonf51f3562008-05-25 23:45:08 -07001942 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001943 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001944}
1945
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001946static void tg3_adjust_link(struct net_device *dev)
1947{
1948 u8 oldflowctrl, linkmesg = 0;
1949 u32 mac_mode, lcl_adv, rmt_adv;
1950 struct tg3 *tp = netdev_priv(dev);
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00001951 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001952
Matt Carlson24bb4fb2009-10-05 17:55:29 +00001953 spin_lock_bh(&tp->lock);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001954
1955 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1956 MAC_MODE_HALF_DUPLEX);
1957
1958 oldflowctrl = tp->link_config.active_flowctrl;
1959
1960 if (phydev->link) {
1961 lcl_adv = 0;
1962 rmt_adv = 0;
1963
1964 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1965 mac_mode |= MAC_MODE_PORT_MODE_MII;
Matt Carlsonc3df0742009-11-02 14:27:02 +00001966 else if (phydev->speed == SPEED_1000 ||
Joe Perches41535772013-02-16 11:20:04 +00001967 tg3_asic_rev(tp) != ASIC_REV_5785)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001968 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Matt Carlsonc3df0742009-11-02 14:27:02 +00001969 else
1970 mac_mode |= MAC_MODE_PORT_MODE_MII;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001971
1972 if (phydev->duplex == DUPLEX_HALF)
1973 mac_mode |= MAC_MODE_HALF_DUPLEX;
1974 else {
Matt Carlsonf88788f2011-12-14 11:10:00 +00001975 lcl_adv = mii_advertise_flowctrl(
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001976 tp->link_config.flowctrl);
1977
1978 if (phydev->pause)
1979 rmt_adv = LPA_PAUSE_CAP;
1980 if (phydev->asym_pause)
1981 rmt_adv |= LPA_PAUSE_ASYM;
1982 }
1983
1984 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1985 } else
1986 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1987
1988 if (mac_mode != tp->mac_mode) {
1989 tp->mac_mode = mac_mode;
1990 tw32_f(MAC_MODE, tp->mac_mode);
1991 udelay(40);
1992 }
1993
Joe Perches41535772013-02-16 11:20:04 +00001994 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001995 if (phydev->speed == SPEED_10)
1996 tw32(MAC_MI_STAT,
1997 MAC_MI_STAT_10MBPS_MODE |
1998 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1999 else
2000 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001 }
2002
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002003 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2004 tw32(MAC_TX_LENGTHS,
2005 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2006 (6 << TX_LENGTHS_IPG_SHIFT) |
2007 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2008 else
2009 tw32(MAC_TX_LENGTHS,
2010 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2011 (6 << TX_LENGTHS_IPG_SHIFT) |
2012 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2013
Matt Carlson34655ad2012-02-22 12:35:18 +00002014 if (phydev->link != tp->old_link ||
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002015 phydev->speed != tp->link_config.active_speed ||
2016 phydev->duplex != tp->link_config.active_duplex ||
2017 oldflowctrl != tp->link_config.active_flowctrl)
Matt Carlsonc6cdf432010-04-05 10:19:26 +00002018 linkmesg = 1;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002019
Matt Carlson34655ad2012-02-22 12:35:18 +00002020 tp->old_link = phydev->link;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002021 tp->link_config.active_speed = phydev->speed;
2022 tp->link_config.active_duplex = phydev->duplex;
2023
Matt Carlson24bb4fb2009-10-05 17:55:29 +00002024 spin_unlock_bh(&tp->lock);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002025
2026 if (linkmesg)
2027 tg3_link_report(tp);
2028}
2029
2030static int tg3_phy_init(struct tg3 *tp)
2031{
2032 struct phy_device *phydev;
2033
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002034 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002035 return 0;
2036
2037 /* Bring the PHY back to a known state. */
2038 tg3_bmcr_reset(tp);
2039
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002040 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002041
2042 /* Attach the MAC to the PHY. */
Florian Fainellif9a8f832013-01-14 00:52:52 +00002043 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2044 tg3_adjust_link, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002045 if (IS_ERR(phydev)) {
Matt Carlsonab96b242010-04-05 10:19:22 +00002046 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002047 return PTR_ERR(phydev);
2048 }
2049
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002050 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002051 switch (phydev->interface) {
2052 case PHY_INTERFACE_MODE_GMII:
2053 case PHY_INTERFACE_MODE_RGMII:
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002054 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
Matt Carlson321d32a2008-11-21 17:22:19 -08002055 phydev->supported &= (PHY_GBIT_FEATURES |
2056 SUPPORTED_Pause |
2057 SUPPORTED_Asym_Pause);
2058 break;
2059 }
2060 /* fallthru */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002061 case PHY_INTERFACE_MODE_MII:
2062 phydev->supported &= (PHY_BASIC_FEATURES |
2063 SUPPORTED_Pause |
2064 SUPPORTED_Asym_Pause);
2065 break;
2066 default:
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002067 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08002068 return -EINVAL;
2069 }
2070
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002071 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002072
2073 phydev->advertising = phydev->supported;
2074
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002075 return 0;
2076}
2077
2078static void tg3_phy_start(struct tg3 *tp)
2079{
2080 struct phy_device *phydev;
2081
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002082 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002083 return;
2084
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002085 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002086
Matt Carlson80096062010-08-02 11:26:06 +00002087 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2088 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
Matt Carlsonc6700ce2012-02-13 15:20:15 +00002089 phydev->speed = tp->link_config.speed;
2090 phydev->duplex = tp->link_config.duplex;
2091 phydev->autoneg = tp->link_config.autoneg;
2092 phydev->advertising = tp->link_config.advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002093 }
2094
2095 phy_start(phydev);
2096
2097 phy_start_aneg(phydev);
2098}
2099
2100static void tg3_phy_stop(struct tg3 *tp)
2101{
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002102 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002103 return;
2104
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002105 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002106}
2107
2108static void tg3_phy_fini(struct tg3 *tp)
2109{
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002110 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00002111 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002112 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002113 }
2114}
2115
Matt Carlson941ec902011-08-19 13:58:23 +00002116static int tg3_phy_set_extloopbk(struct tg3 *tp)
2117{
2118 int err;
2119 u32 val;
2120
2121 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2122 return 0;
2123
2124 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2125 /* Cannot do read-modify-write on 5401 */
2126 err = tg3_phy_auxctl_write(tp,
2127 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2128 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2129 0x4c20);
2130 goto done;
2131 }
2132
2133 err = tg3_phy_auxctl_read(tp,
2134 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2135 if (err)
2136 return err;
2137
2138 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2139 err = tg3_phy_auxctl_write(tp,
2140 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2141
2142done:
2143 return err;
2144}
2145
Matt Carlson7f97a4b2009-08-25 10:10:03 +00002146static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2147{
2148 u32 phytest;
2149
2150 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2151 u32 phy;
2152
2153 tg3_writephy(tp, MII_TG3_FET_TEST,
2154 phytest | MII_TG3_FET_SHADOW_EN);
2155 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2156 if (enable)
2157 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2158 else
2159 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2161 }
2162 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2163 }
2164}
2165
Matt Carlson6833c042008-11-21 17:18:59 -08002166static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2167{
2168 u32 reg;
2169
Joe Perches63c3a662011-04-26 08:12:10 +00002170 if (!tg3_flag(tp, 5705_PLUS) ||
2171 (tg3_flag(tp, 5717_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002172 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
Matt Carlson6833c042008-11-21 17:18:59 -08002173 return;
2174
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002175 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson7f97a4b2009-08-25 10:10:03 +00002176 tg3_phy_fet_toggle_apd(tp, enable);
2177 return;
2178 }
2179
Matt Carlson6833c042008-11-21 17:18:59 -08002180 reg = MII_TG3_MISC_SHDW_WREN |
2181 MII_TG3_MISC_SHDW_SCR5_SEL |
2182 MII_TG3_MISC_SHDW_SCR5_LPED |
2183 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2184 MII_TG3_MISC_SHDW_SCR5_SDTL |
2185 MII_TG3_MISC_SHDW_SCR5_C125OE;
Joe Perches41535772013-02-16 11:20:04 +00002186 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
Matt Carlson6833c042008-11-21 17:18:59 -08002187 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2188
2189 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2190
2191
2192 reg = MII_TG3_MISC_SHDW_WREN |
2193 MII_TG3_MISC_SHDW_APD_SEL |
2194 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2195 if (enable)
2196 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2197
2198 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2199}
2200
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002201static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2202{
2203 u32 phy;
2204
Joe Perches63c3a662011-04-26 08:12:10 +00002205 if (!tg3_flag(tp, 5705_PLUS) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002206 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002207 return;
2208
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002209 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002210 u32 ephy;
2211
Matt Carlson535ef6e2009-08-25 10:09:36 +00002212 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2213 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2214
2215 tg3_writephy(tp, MII_TG3_FET_TEST,
2216 ephy | MII_TG3_FET_SHADOW_EN);
2217 if (!tg3_readphy(tp, reg, &phy)) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002218 if (enable)
Matt Carlson535ef6e2009-08-25 10:09:36 +00002219 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002220 else
Matt Carlson535ef6e2009-08-25 10:09:36 +00002221 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222 tg3_writephy(tp, reg, phy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002223 }
Matt Carlson535ef6e2009-08-25 10:09:36 +00002224 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002225 }
2226 } else {
Matt Carlson15ee95c2011-04-20 07:57:40 +00002227 int ret;
2228
2229 ret = tg3_phy_auxctl_read(tp,
2230 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2231 if (!ret) {
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002232 if (enable)
2233 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2234 else
2235 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002236 tg3_phy_auxctl_write(tp,
2237 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002238 }
2239 }
2240}
2241
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242static void tg3_phy_set_wirespeed(struct tg3 *tp)
2243{
Matt Carlson15ee95c2011-04-20 07:57:40 +00002244 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 u32 val;
2246
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002247 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 return;
2249
Matt Carlson15ee95c2011-04-20 07:57:40 +00002250 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2251 if (!ret)
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002252 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2253 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254}
2255
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002256static void tg3_phy_apply_otp(struct tg3 *tp)
2257{
2258 u32 otp, phy;
2259
2260 if (!tp->phy_otp)
2261 return;
2262
2263 otp = tp->phy_otp;
2264
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002265 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
Matt Carlson1d36ba42011-04-20 07:57:42 +00002266 return;
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002267
2268 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2269 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2270 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2271
2272 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2273 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2274 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2275
2276 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2277 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2278 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2279
2280 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2281 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2282
2283 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2284 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2285
2286 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2287 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2288 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2289
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002290 tg3_phy_toggle_auxctl_smdsp(tp, false);
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002291}
2292
Matt Carlson52b02d02010-10-14 10:37:41 +00002293static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2294{
2295 u32 val;
2296
2297 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2298 return;
2299
2300 tp->setlpicnt = 0;
2301
2302 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2303 current_link_up == 1 &&
Matt Carlsona6b68da2010-12-06 08:28:52 +00002304 tp->link_config.active_duplex == DUPLEX_FULL &&
2305 (tp->link_config.active_speed == SPEED_100 ||
2306 tp->link_config.active_speed == SPEED_1000)) {
Matt Carlson52b02d02010-10-14 10:37:41 +00002307 u32 eeectl;
2308
2309 if (tp->link_config.active_speed == SPEED_1000)
2310 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2311 else
2312 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2313
2314 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2315
Matt Carlson3110f5f52010-12-06 08:28:50 +00002316 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2317 TG3_CL45_D7_EEERES_STAT, &val);
Matt Carlson52b02d02010-10-14 10:37:41 +00002318
Matt Carlsonb0c59432011-05-19 12:12:48 +00002319 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2320 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
Matt Carlson52b02d02010-10-14 10:37:41 +00002321 tp->setlpicnt = 2;
2322 }
2323
2324 if (!tp->setlpicnt) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00002325 if (current_link_up == 1 &&
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002326 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00002327 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002328 tg3_phy_toggle_auxctl_smdsp(tp, false);
Matt Carlsonb715ce92011-07-20 10:20:52 +00002329 }
2330
Matt Carlson52b02d02010-10-14 10:37:41 +00002331 val = tr32(TG3_CPMU_EEE_MODE);
2332 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2333 }
2334}
2335
Matt Carlsonb0c59432011-05-19 12:12:48 +00002336static void tg3_phy_eee_enable(struct tg3 *tp)
2337{
2338 u32 val;
2339
2340 if (tp->link_config.active_speed == SPEED_1000 &&
Joe Perches41535772013-02-16 11:20:04 +00002341 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2342 tg3_asic_rev(tp) == ASIC_REV_5719 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00002343 tg3_flag(tp, 57765_CLASS)) &&
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002344 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00002345 val = MII_TG3_DSP_TAP26_ALNOKO |
2346 MII_TG3_DSP_TAP26_RMRXSTO;
2347 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002348 tg3_phy_toggle_auxctl_smdsp(tp, false);
Matt Carlsonb0c59432011-05-19 12:12:48 +00002349 }
2350
2351 val = tr32(TG3_CPMU_EEE_MODE);
2352 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2353}
2354
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355static int tg3_wait_macro_done(struct tg3 *tp)
2356{
2357 int limit = 100;
2358
2359 while (limit--) {
2360 u32 tmp32;
2361
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002362 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 if ((tmp32 & 0x1000) == 0)
2364 break;
2365 }
2366 }
Roel Kluind4675b52009-02-12 16:33:27 -08002367 if (limit < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 return -EBUSY;
2369
2370 return 0;
2371}
2372
2373static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2374{
2375 static const u32 test_pat[4][6] = {
2376 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2377 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2378 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2379 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2380 };
2381 int chan;
2382
2383 for (chan = 0; chan < 4; chan++) {
2384 int i;
2385
2386 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2387 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002388 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
2390 for (i = 0; i < 6; i++)
2391 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2392 test_pat[chan][i]);
2393
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002394 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 if (tg3_wait_macro_done(tp)) {
2396 *resetp = 1;
2397 return -EBUSY;
2398 }
2399
2400 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2401 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002402 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 if (tg3_wait_macro_done(tp)) {
2404 *resetp = 1;
2405 return -EBUSY;
2406 }
2407
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002408 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 if (tg3_wait_macro_done(tp)) {
2410 *resetp = 1;
2411 return -EBUSY;
2412 }
2413
2414 for (i = 0; i < 6; i += 2) {
2415 u32 low, high;
2416
2417 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2418 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2419 tg3_wait_macro_done(tp)) {
2420 *resetp = 1;
2421 return -EBUSY;
2422 }
2423 low &= 0x7fff;
2424 high &= 0x000f;
2425 if (low != test_pat[chan][i] ||
2426 high != test_pat[chan][i+1]) {
2427 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2429 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2430
2431 return -EBUSY;
2432 }
2433 }
2434 }
2435
2436 return 0;
2437}
2438
2439static int tg3_phy_reset_chanpat(struct tg3 *tp)
2440{
2441 int chan;
2442
2443 for (chan = 0; chan < 4; chan++) {
2444 int i;
2445
2446 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2447 (chan * 0x2000) | 0x0200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002448 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 for (i = 0; i < 6; i++)
2450 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002451 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 if (tg3_wait_macro_done(tp))
2453 return -EBUSY;
2454 }
2455
2456 return 0;
2457}
2458
2459static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2460{
2461 u32 reg32, phy9_orig;
2462 int retries, do_phy_reset, err;
2463
2464 retries = 10;
2465 do_phy_reset = 1;
2466 do {
2467 if (do_phy_reset) {
2468 err = tg3_bmcr_reset(tp);
2469 if (err)
2470 return err;
2471 do_phy_reset = 0;
2472 }
2473
2474 /* Disable transmitter and interrupt. */
2475 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2476 continue;
2477
2478 reg32 |= 0x3000;
2479 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2480
2481 /* Set full-duplex, 1000 mbps. */
2482 tg3_writephy(tp, MII_BMCR,
Matt Carlson221c5632011-06-13 13:39:01 +00002483 BMCR_FULLDPLX | BMCR_SPEED1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
2485 /* Set to master mode. */
Matt Carlson221c5632011-06-13 13:39:01 +00002486 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 continue;
2488
Matt Carlson221c5632011-06-13 13:39:01 +00002489 tg3_writephy(tp, MII_CTRL1000,
2490 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002492 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
Matt Carlson1d36ba42011-04-20 07:57:42 +00002493 if (err)
2494 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
2496 /* Block the PHY control access. */
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002497 tg3_phydsp_write(tp, 0x8005, 0x0800);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
2499 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2500 if (!err)
2501 break;
2502 } while (--retries);
2503
2504 err = tg3_phy_reset_chanpat(tp);
2505 if (err)
2506 return err;
2507
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002508 tg3_phydsp_write(tp, 0x8005, 0x0000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
2510 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002511 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002513 tg3_phy_toggle_auxctl_smdsp(tp, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
Matt Carlson221c5632011-06-13 13:39:01 +00002515 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
2517 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2518 reg32 &= ~0x3000;
2519 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2520 } else if (!err)
2521 err = -EBUSY;
2522
2523 return err;
2524}
2525
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00002526static void tg3_carrier_on(struct tg3 *tp)
2527{
2528 netif_carrier_on(tp->dev);
2529 tp->link_up = true;
2530}
2531
2532static void tg3_carrier_off(struct tg3 *tp)
2533{
2534 netif_carrier_off(tp->dev);
2535 tp->link_up = false;
2536}
2537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538/* This will reset the tigon3 PHY if there is no valid
2539 * link unless the FORCE argument is non-zero.
2540 */
2541static int tg3_phy_reset(struct tg3 *tp)
2542{
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002543 u32 val, cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 int err;
2545
Joe Perches41535772013-02-16 11:20:04 +00002546 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002547 val = tr32(GRC_MISC_CFG);
2548 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2549 udelay(40);
2550 }
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002551 err = tg3_readphy(tp, MII_BMSR, &val);
2552 err |= tg3_readphy(tp, MII_BMSR, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 if (err != 0)
2554 return -EBUSY;
2555
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00002556 if (netif_running(tp->dev) && tp->link_up) {
2557 tg3_carrier_off(tp);
Michael Chanc8e1e822006-04-29 18:55:17 -07002558 tg3_link_report(tp);
2559 }
2560
Joe Perches41535772013-02-16 11:20:04 +00002561 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2562 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2563 tg3_asic_rev(tp) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 err = tg3_phy_reset_5703_4_5(tp);
2565 if (err)
2566 return err;
2567 goto out;
2568 }
2569
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002570 cpmuctrl = 0;
Joe Perches41535772013-02-16 11:20:04 +00002571 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2572 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002573 cpmuctrl = tr32(TG3_CPMU_CTRL);
2574 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2575 tw32(TG3_CPMU_CTRL,
2576 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2577 }
2578
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 err = tg3_bmcr_reset(tp);
2580 if (err)
2581 return err;
2582
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002583 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002584 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2585 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002586
2587 tw32(TG3_CPMU_CTRL, cpmuctrl);
2588 }
2589
Joe Perches41535772013-02-16 11:20:04 +00002590 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2591 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002592 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2593 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2594 CPMU_LSPD_1000MB_MACCLK_12_5) {
2595 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2596 udelay(40);
2597 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2598 }
2599 }
2600
Joe Perches63c3a662011-04-26 08:12:10 +00002601 if (tg3_flag(tp, 5717_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002602 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
Matt Carlsonecf14102010-01-20 16:58:05 +00002603 return 0;
2604
Matt Carlsonb2a5c192008-04-03 21:44:44 -07002605 tg3_phy_apply_otp(tp);
2606
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002607 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
Matt Carlson6833c042008-11-21 17:18:59 -08002608 tg3_phy_toggle_apd(tp, true);
2609 else
2610 tg3_phy_toggle_apd(tp, false);
2611
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612out:
Matt Carlson1d36ba42011-04-20 07:57:42 +00002613 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002614 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00002615 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2616 tg3_phydsp_write(tp, 0x000a, 0x0323);
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002617 tg3_phy_toggle_auxctl_smdsp(tp, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002619
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002620 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00002621 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002624
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002625 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002626 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
Matt Carlson1d36ba42011-04-20 07:57:42 +00002627 tg3_phydsp_write(tp, 0x000a, 0x310b);
2628 tg3_phydsp_write(tp, 0x201f, 0x9506);
2629 tg3_phydsp_write(tp, 0x401f, 0x14e2);
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002630 tg3_phy_toggle_auxctl_smdsp(tp, false);
Matt Carlson1d36ba42011-04-20 07:57:42 +00002631 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002632 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002633 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
Matt Carlson1d36ba42011-04-20 07:57:42 +00002634 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2635 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2636 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2637 tg3_writephy(tp, MII_TG3_TEST1,
2638 MII_TG3_TEST1_TRIM_EN | 0x4);
2639 } else
2640 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2641
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00002642 tg3_phy_toggle_auxctl_smdsp(tp, false);
Matt Carlson1d36ba42011-04-20 07:57:42 +00002643 }
Michael Chanc424cb22006-04-29 18:56:34 -07002644 }
Matt Carlson1d36ba42011-04-20 07:57:42 +00002645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 /* Set Extended packet length bit (bit 14) on all chips that */
2647 /* support jumbo frames */
Matt Carlson79eb6902010-02-17 15:17:03 +00002648 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 /* Cannot do read-modify-write on 5401 */
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002650 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
Joe Perches63c3a662011-04-26 08:12:10 +00002651 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 /* Set bit 14 with read-modify-write to preserve other bits */
Matt Carlson15ee95c2011-04-20 07:57:40 +00002653 err = tg3_phy_auxctl_read(tp,
2654 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2655 if (!err)
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002656 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2657 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 }
2659
2660 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2661 * jumbo frames transmission.
2662 */
Joe Perches63c3a662011-04-26 08:12:10 +00002663 if (tg3_flag(tp, JUMBO_CAPABLE)) {
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002664 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
Matt Carlsonc6cdf432010-04-05 10:19:26 +00002665 tg3_writephy(tp, MII_TG3_EXT_CTRL,
Matt Carlsonf833c4c2010-09-15 09:00:01 +00002666 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 }
2668
Joe Perches41535772013-02-16 11:20:04 +00002669 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07002670 /* adjust output voltage */
Matt Carlson535ef6e2009-08-25 10:09:36 +00002671 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07002672 }
2673
Joe Perches41535772013-02-16 11:20:04 +00002674 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
Michael Chanc65a17f2013-01-06 12:51:07 +00002675 tg3_phydsp_write(tp, 0xffb, 0x4000);
2676
Matt Carlson9ef8ca92007-07-11 19:48:29 -07002677 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 tg3_phy_set_wirespeed(tp);
2679 return 0;
2680}
2681
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002682#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2683#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2684#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2685 TG3_GPIO_MSG_NEED_VAUX)
2686#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2687 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2690 (TG3_GPIO_MSG_DRVR_PRES << 12))
2691
2692#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2693 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2696 (TG3_GPIO_MSG_NEED_VAUX << 12))
2697
2698static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2699{
2700 u32 status, shift;
2701
Joe Perches41535772013-02-16 11:20:04 +00002702 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2703 tg3_asic_rev(tp) == ASIC_REV_5719)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002704 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2705 else
2706 status = tr32(TG3_CPMU_DRV_STATUS);
2707
2708 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2709 status &= ~(TG3_GPIO_MSG_MASK << shift);
2710 status |= (newstat << shift);
2711
Joe Perches41535772013-02-16 11:20:04 +00002712 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2713 tg3_asic_rev(tp) == ASIC_REV_5719)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002714 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2715 else
2716 tw32(TG3_CPMU_DRV_STATUS, status);
2717
2718 return status >> TG3_APE_GPIO_MSG_SHIFT;
2719}
2720
Matt Carlson520b2752011-06-13 13:39:02 +00002721static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2722{
2723 if (!tg3_flag(tp, IS_NIC))
2724 return 0;
2725
Joe Perches41535772013-02-16 11:20:04 +00002726 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2727 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2728 tg3_asic_rev(tp) == ASIC_REV_5720) {
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002729 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2730 return -EIO;
Matt Carlson520b2752011-06-13 13:39:02 +00002731
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002732 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2733
2734 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2735 TG3_GRC_LCLCTL_PWRSW_DELAY);
2736
2737 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2738 } else {
2739 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2740 TG3_GRC_LCLCTL_PWRSW_DELAY);
2741 }
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002742
Matt Carlson520b2752011-06-13 13:39:02 +00002743 return 0;
2744}
2745
2746static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2747{
2748 u32 grc_local_ctrl;
2749
2750 if (!tg3_flag(tp, IS_NIC) ||
Joe Perches41535772013-02-16 11:20:04 +00002751 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2752 tg3_asic_rev(tp) == ASIC_REV_5701)
Matt Carlson520b2752011-06-13 13:39:02 +00002753 return;
2754
2755 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2756
2757 tw32_wait_f(GRC_LOCAL_CTRL,
2758 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2759 TG3_GRC_LCLCTL_PWRSW_DELAY);
2760
2761 tw32_wait_f(GRC_LOCAL_CTRL,
2762 grc_local_ctrl,
2763 TG3_GRC_LCLCTL_PWRSW_DELAY);
2764
2765 tw32_wait_f(GRC_LOCAL_CTRL,
2766 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2767 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768}
2769
2770static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2771{
2772 if (!tg3_flag(tp, IS_NIC))
2773 return;
2774
Joe Perches41535772013-02-16 11:20:04 +00002775 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2776 tg3_asic_rev(tp) == ASIC_REV_5701) {
Matt Carlson520b2752011-06-13 13:39:02 +00002777 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2778 (GRC_LCLCTRL_GPIO_OE0 |
2779 GRC_LCLCTRL_GPIO_OE1 |
2780 GRC_LCLCTRL_GPIO_OE2 |
2781 GRC_LCLCTRL_GPIO_OUTPUT0 |
2782 GRC_LCLCTRL_GPIO_OUTPUT1),
2783 TG3_GRC_LCLCTL_PWRSW_DELAY);
2784 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2785 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2786 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2787 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2788 GRC_LCLCTRL_GPIO_OE1 |
2789 GRC_LCLCTRL_GPIO_OE2 |
2790 GRC_LCLCTRL_GPIO_OUTPUT0 |
2791 GRC_LCLCTRL_GPIO_OUTPUT1 |
2792 tp->grc_local_ctrl;
2793 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2797 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799
2800 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2801 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2802 TG3_GRC_LCLCTL_PWRSW_DELAY);
2803 } else {
2804 u32 no_gpio2;
2805 u32 grc_local_ctrl = 0;
2806
2807 /* Workaround to prevent overdrawing Amps. */
Joe Perches41535772013-02-16 11:20:04 +00002808 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
Matt Carlson520b2752011-06-13 13:39:02 +00002809 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2810 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2811 grc_local_ctrl,
2812 TG3_GRC_LCLCTL_PWRSW_DELAY);
2813 }
2814
2815 /* On 5753 and variants, GPIO2 cannot be used. */
2816 no_gpio2 = tp->nic_sram_data_cfg &
2817 NIC_SRAM_DATA_CFG_NO_GPIO2;
2818
2819 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2820 GRC_LCLCTRL_GPIO_OE1 |
2821 GRC_LCLCTRL_GPIO_OE2 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2823 GRC_LCLCTRL_GPIO_OUTPUT2;
2824 if (no_gpio2) {
2825 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2826 GRC_LCLCTRL_GPIO_OUTPUT2);
2827 }
2828 tw32_wait_f(GRC_LOCAL_CTRL,
2829 tp->grc_local_ctrl | grc_local_ctrl,
2830 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831
2832 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2833
2834 tw32_wait_f(GRC_LOCAL_CTRL,
2835 tp->grc_local_ctrl | grc_local_ctrl,
2836 TG3_GRC_LCLCTL_PWRSW_DELAY);
2837
2838 if (!no_gpio2) {
2839 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2840 tw32_wait_f(GRC_LOCAL_CTRL,
2841 tp->grc_local_ctrl | grc_local_ctrl,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 }
2844 }
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002845}
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002846
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002847static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002848{
2849 u32 msg = 0;
2850
2851 /* Serialize power state transitions */
2852 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2853 return;
2854
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002855 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002856 msg = TG3_GPIO_MSG_NEED_VAUX;
2857
2858 msg = tg3_set_function_status(tp, msg);
2859
2860 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2861 goto done;
2862
2863 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2864 tg3_pwrsrc_switch_to_vaux(tp);
2865 else
2866 tg3_pwrsrc_die_with_vmain(tp);
2867
2868done:
Matt Carlson6f5c8f832011-07-13 09:27:31 +00002869 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
Matt Carlson520b2752011-06-13 13:39:02 +00002870}
2871
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002872static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873{
Matt Carlson683644b2011-03-09 16:58:23 +00002874 bool need_vaux = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Matt Carlson334355a2010-01-20 16:58:10 +00002876 /* The GPIOs do something completely different on 57765. */
Matt Carlson55086ad2011-12-14 11:09:59 +00002877 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 return;
2879
Joe Perches41535772013-02-16 11:20:04 +00002880 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2881 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2882 tg3_asic_rev(tp) == ASIC_REV_5720) {
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002883 tg3_frob_aux_power_5717(tp, include_wol ?
2884 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
Matt Carlson3a1e19d2011-07-13 09:27:32 +00002885 return;
2886 }
2887
2888 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002889 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002891 dev_peer = pci_get_drvdata(tp->pdev_peer);
Matt Carlson683644b2011-03-09 16:58:23 +00002892
Michael Chanbc1c7562006-03-20 17:48:03 -08002893 /* remove_one() may have been run on the peer. */
Matt Carlson683644b2011-03-09 16:58:23 +00002894 if (dev_peer) {
2895 struct tg3 *tp_peer = netdev_priv(dev_peer);
2896
Joe Perches63c3a662011-04-26 08:12:10 +00002897 if (tg3_flag(tp_peer, INIT_COMPLETE))
Matt Carlson683644b2011-03-09 16:58:23 +00002898 return;
2899
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002900 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
Joe Perches63c3a662011-04-26 08:12:10 +00002901 tg3_flag(tp_peer, ENABLE_ASF))
Matt Carlson683644b2011-03-09 16:58:23 +00002902 need_vaux = true;
2903 }
Michael Chan8c2dc7e2005-12-19 16:26:02 -08002904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
Matt Carlsoncd0d7222011-07-13 09:27:33 +00002906 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2907 tg3_flag(tp, ENABLE_ASF))
Matt Carlson683644b2011-03-09 16:58:23 +00002908 need_vaux = true;
2909
Matt Carlson520b2752011-06-13 13:39:02 +00002910 if (need_vaux)
2911 tg3_pwrsrc_switch_to_vaux(tp);
2912 else
2913 tg3_pwrsrc_die_with_vmain(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914}
2915
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002916static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2917{
2918 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2919 return 1;
Matt Carlson79eb6902010-02-17 15:17:03 +00002920 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002921 if (speed != SPEED_10)
2922 return 1;
2923 } else if (speed == SPEED_10)
2924 return 1;
2925
2926 return 0;
2927}
2928
Matt Carlson0a459aa2008-11-03 16:54:15 -08002929static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002930{
Matt Carlsonce057f02007-11-12 21:08:03 -08002931 u32 val;
2932
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002933 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Joe Perches41535772013-02-16 11:20:04 +00002934 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
Michael Chan51297242007-02-13 12:17:57 -08002935 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2936 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2937
2938 sg_dig_ctrl |=
2939 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2940 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2941 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2942 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002943 return;
Michael Chan51297242007-02-13 12:17:57 -08002944 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002945
Joe Perches41535772013-02-16 11:20:04 +00002946 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002947 tg3_bmcr_reset(tp);
2948 val = tr32(GRC_MISC_CFG);
2949 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2950 udelay(40);
2951 return;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00002952 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Matt Carlson0e5f7842009-11-02 14:26:38 +00002953 u32 phytest;
2954 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2955 u32 phy;
2956
2957 tg3_writephy(tp, MII_ADVERTISE, 0);
2958 tg3_writephy(tp, MII_BMCR,
2959 BMCR_ANENABLE | BMCR_ANRESTART);
2960
2961 tg3_writephy(tp, MII_TG3_FET_TEST,
2962 phytest | MII_TG3_FET_SHADOW_EN);
2963 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2964 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2965 tg3_writephy(tp,
2966 MII_TG3_FET_SHDW_AUXMODE4,
2967 phy);
2968 }
2969 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2970 }
2971 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002972 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002973 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2974 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002975
Matt Carlsonb4bd2922011-04-20 07:57:41 +00002976 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2977 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2978 MII_TG3_AUXCTL_PCTL_VREG_11V;
2979 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
Michael Chan715116a2006-09-27 16:09:25 -07002980 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002981
Michael Chan15c3b692006-03-22 01:06:52 -08002982 /* The PHY should not be powered down on some chips because
2983 * of bugs.
2984 */
Joe Perches41535772013-02-16 11:20:04 +00002985 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2986 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2987 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
Matt Carlson085f1af2012-04-02 09:01:40 +00002988 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
Joe Perches41535772013-02-16 11:20:04 +00002989 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
Matt Carlson085f1af2012-04-02 09:01:40 +00002990 !tp->pci_fn))
Michael Chan15c3b692006-03-22 01:06:52 -08002991 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002992
Joe Perches41535772013-02-16 11:20:04 +00002993 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2994 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002995 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2996 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2997 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2998 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2999 }
3000
Michael Chan15c3b692006-03-22 01:06:52 -08003001 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3002}
3003
Matt Carlson3f007892008-11-03 16:51:36 -08003004/* tp->lock is held. */
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003005static int tg3_nvram_lock(struct tg3 *tp)
3006{
Joe Perches63c3a662011-04-26 08:12:10 +00003007 if (tg3_flag(tp, NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003008 int i;
3009
3010 if (tp->nvram_lock_cnt == 0) {
3011 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3012 for (i = 0; i < 8000; i++) {
3013 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3014 break;
3015 udelay(20);
3016 }
3017 if (i == 8000) {
3018 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3019 return -ENODEV;
3020 }
3021 }
3022 tp->nvram_lock_cnt++;
3023 }
3024 return 0;
3025}
3026
3027/* tp->lock is held. */
3028static void tg3_nvram_unlock(struct tg3 *tp)
3029{
Joe Perches63c3a662011-04-26 08:12:10 +00003030 if (tg3_flag(tp, NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003031 if (tp->nvram_lock_cnt > 0)
3032 tp->nvram_lock_cnt--;
3033 if (tp->nvram_lock_cnt == 0)
3034 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3035 }
3036}
3037
3038/* tp->lock is held. */
3039static void tg3_enable_nvram_access(struct tg3 *tp)
3040{
Joe Perches63c3a662011-04-26 08:12:10 +00003041 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003042 u32 nvaccess = tr32(NVRAM_ACCESS);
3043
3044 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3045 }
3046}
3047
3048/* tp->lock is held. */
3049static void tg3_disable_nvram_access(struct tg3 *tp)
3050{
Joe Perches63c3a662011-04-26 08:12:10 +00003051 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003052 u32 nvaccess = tr32(NVRAM_ACCESS);
3053
3054 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3055 }
3056}
3057
3058static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3059 u32 offset, u32 *val)
3060{
3061 u32 tmp;
3062 int i;
3063
3064 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3065 return -EINVAL;
3066
3067 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3068 EEPROM_ADDR_DEVID_MASK |
3069 EEPROM_ADDR_READ);
3070 tw32(GRC_EEPROM_ADDR,
3071 tmp |
3072 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3073 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3074 EEPROM_ADDR_ADDR_MASK) |
3075 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3076
3077 for (i = 0; i < 1000; i++) {
3078 tmp = tr32(GRC_EEPROM_ADDR);
3079
3080 if (tmp & EEPROM_ADDR_COMPLETE)
3081 break;
3082 msleep(1);
3083 }
3084 if (!(tmp & EEPROM_ADDR_COMPLETE))
3085 return -EBUSY;
3086
Matt Carlson62cedd12009-04-20 14:52:29 -07003087 tmp = tr32(GRC_EEPROM_DATA);
3088
3089 /*
3090 * The data will always be opposite the native endian
3091 * format. Perform a blind byteswap to compensate.
3092 */
3093 *val = swab32(tmp);
3094
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003095 return 0;
3096}
3097
3098#define NVRAM_CMD_TIMEOUT 10000
3099
3100static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3101{
3102 int i;
3103
3104 tw32(NVRAM_CMD, nvram_cmd);
3105 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3106 udelay(10);
3107 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3108 udelay(10);
3109 break;
3110 }
3111 }
3112
3113 if (i == NVRAM_CMD_TIMEOUT)
3114 return -EBUSY;
3115
3116 return 0;
3117}
3118
3119static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3120{
Joe Perches63c3a662011-04-26 08:12:10 +00003121 if (tg3_flag(tp, NVRAM) &&
3122 tg3_flag(tp, NVRAM_BUFFERED) &&
3123 tg3_flag(tp, FLASH) &&
3124 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003125 (tp->nvram_jedecnum == JEDEC_ATMEL))
3126
3127 addr = ((addr / tp->nvram_pagesize) <<
3128 ATMEL_AT45DB0X1B_PAGE_POS) +
3129 (addr % tp->nvram_pagesize);
3130
3131 return addr;
3132}
3133
3134static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3135{
Joe Perches63c3a662011-04-26 08:12:10 +00003136 if (tg3_flag(tp, NVRAM) &&
3137 tg3_flag(tp, NVRAM_BUFFERED) &&
3138 tg3_flag(tp, FLASH) &&
3139 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003140 (tp->nvram_jedecnum == JEDEC_ATMEL))
3141
3142 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3143 tp->nvram_pagesize) +
3144 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3145
3146 return addr;
3147}
3148
Matt Carlsone4f34112009-02-25 14:25:00 +00003149/* NOTE: Data read in from NVRAM is byteswapped according to
3150 * the byteswapping settings for all other register accesses.
3151 * tg3 devices are BE devices, so on a BE machine, the data
3152 * returned will be exactly as it is seen in NVRAM. On a LE
3153 * machine, the 32-bit value will be byteswapped.
3154 */
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003155static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3156{
3157 int ret;
3158
Joe Perches63c3a662011-04-26 08:12:10 +00003159 if (!tg3_flag(tp, NVRAM))
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003160 return tg3_nvram_read_using_eeprom(tp, offset, val);
3161
3162 offset = tg3_nvram_phys_addr(tp, offset);
3163
3164 if (offset > NVRAM_ADDR_MSK)
3165 return -EINVAL;
3166
3167 ret = tg3_nvram_lock(tp);
3168 if (ret)
3169 return ret;
3170
3171 tg3_enable_nvram_access(tp);
3172
3173 tw32(NVRAM_ADDR, offset);
3174 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3175 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3176
3177 if (ret == 0)
Matt Carlsone4f34112009-02-25 14:25:00 +00003178 *val = tr32(NVRAM_RDDATA);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003179
3180 tg3_disable_nvram_access(tp);
3181
3182 tg3_nvram_unlock(tp);
3183
3184 return ret;
3185}
3186
Matt Carlsona9dc5292009-02-25 14:25:30 +00003187/* Ensures NVRAM data is in bytestream format. */
3188static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003189{
3190 u32 v;
Matt Carlsona9dc5292009-02-25 14:25:30 +00003191 int res = tg3_nvram_read(tp, offset, &v);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003192 if (!res)
Matt Carlsona9dc5292009-02-25 14:25:30 +00003193 *val = cpu_to_be32(v);
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003194 return res;
3195}
3196
Matt Carlsondbe9b922012-02-13 10:20:09 +00003197static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3198 u32 offset, u32 len, u8 *buf)
3199{
3200 int i, j, rc = 0;
3201 u32 val;
3202
3203 for (i = 0; i < len; i += 4) {
3204 u32 addr;
3205 __be32 data;
3206
3207 addr = offset + i;
3208
3209 memcpy(&data, buf + i, 4);
3210
3211 /*
3212 * The SEEPROM interface expects the data to always be opposite
3213 * the native endian format. We accomplish this by reversing
3214 * all the operations that would have been performed on the
3215 * data from a call to tg3_nvram_read_be32().
3216 */
3217 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3218
3219 val = tr32(GRC_EEPROM_ADDR);
3220 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3221
3222 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3223 EEPROM_ADDR_READ);
3224 tw32(GRC_EEPROM_ADDR, val |
3225 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3226 (addr & EEPROM_ADDR_ADDR_MASK) |
3227 EEPROM_ADDR_START |
3228 EEPROM_ADDR_WRITE);
3229
3230 for (j = 0; j < 1000; j++) {
3231 val = tr32(GRC_EEPROM_ADDR);
3232
3233 if (val & EEPROM_ADDR_COMPLETE)
3234 break;
3235 msleep(1);
3236 }
3237 if (!(val & EEPROM_ADDR_COMPLETE)) {
3238 rc = -EBUSY;
3239 break;
3240 }
3241 }
3242
3243 return rc;
3244}
3245
3246/* offset and length are dword aligned */
3247static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3248 u8 *buf)
3249{
3250 int ret = 0;
3251 u32 pagesize = tp->nvram_pagesize;
3252 u32 pagemask = pagesize - 1;
3253 u32 nvram_cmd;
3254 u8 *tmp;
3255
3256 tmp = kmalloc(pagesize, GFP_KERNEL);
3257 if (tmp == NULL)
3258 return -ENOMEM;
3259
3260 while (len) {
3261 int j;
3262 u32 phy_addr, page_off, size;
3263
3264 phy_addr = offset & ~pagemask;
3265
3266 for (j = 0; j < pagesize; j += 4) {
3267 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3268 (__be32 *) (tmp + j));
3269 if (ret)
3270 break;
3271 }
3272 if (ret)
3273 break;
3274
3275 page_off = offset & pagemask;
3276 size = pagesize;
3277 if (len < size)
3278 size = len;
3279
3280 len -= size;
3281
3282 memcpy(tmp + page_off, buf, size);
3283
3284 offset = offset + (pagesize - page_off);
3285
3286 tg3_enable_nvram_access(tp);
3287
3288 /*
3289 * Before we can erase the flash page, we need
3290 * to issue a special "write enable" command.
3291 */
3292 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293
3294 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3295 break;
3296
3297 /* Erase the target page */
3298 tw32(NVRAM_ADDR, phy_addr);
3299
3300 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3301 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3302
3303 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3304 break;
3305
3306 /* Issue another write enable to start the write. */
3307 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3308
3309 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3310 break;
3311
3312 for (j = 0; j < pagesize; j += 4) {
3313 __be32 data;
3314
3315 data = *((__be32 *) (tmp + j));
3316
3317 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3318
3319 tw32(NVRAM_ADDR, phy_addr + j);
3320
3321 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3322 NVRAM_CMD_WR;
3323
3324 if (j == 0)
3325 nvram_cmd |= NVRAM_CMD_FIRST;
3326 else if (j == (pagesize - 4))
3327 nvram_cmd |= NVRAM_CMD_LAST;
3328
3329 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3330 if (ret)
3331 break;
3332 }
3333 if (ret)
3334 break;
3335 }
3336
3337 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3338 tg3_nvram_exec_cmd(tp, nvram_cmd);
3339
3340 kfree(tmp);
3341
3342 return ret;
3343}
3344
3345/* offset and length are dword aligned */
3346static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3347 u8 *buf)
3348{
3349 int i, ret = 0;
3350
3351 for (i = 0; i < len; i += 4, offset += 4) {
3352 u32 page_off, phy_addr, nvram_cmd;
3353 __be32 data;
3354
3355 memcpy(&data, buf + i, 4);
3356 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3357
3358 page_off = offset % tp->nvram_pagesize;
3359
3360 phy_addr = tg3_nvram_phys_addr(tp, offset);
3361
Matt Carlsondbe9b922012-02-13 10:20:09 +00003362 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3363
3364 if (page_off == 0 || i == 0)
3365 nvram_cmd |= NVRAM_CMD_FIRST;
3366 if (page_off == (tp->nvram_pagesize - 4))
3367 nvram_cmd |= NVRAM_CMD_LAST;
3368
3369 if (i == (len - 4))
3370 nvram_cmd |= NVRAM_CMD_LAST;
3371
Matt Carlson42278222012-02-13 15:20:11 +00003372 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3373 !tg3_flag(tp, FLASH) ||
3374 !tg3_flag(tp, 57765_PLUS))
3375 tw32(NVRAM_ADDR, phy_addr);
3376
Joe Perches41535772013-02-16 11:20:04 +00003377 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
Matt Carlsondbe9b922012-02-13 10:20:09 +00003378 !tg3_flag(tp, 5755_PLUS) &&
3379 (tp->nvram_jedecnum == JEDEC_ST) &&
3380 (nvram_cmd & NVRAM_CMD_FIRST)) {
3381 u32 cmd;
3382
3383 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3384 ret = tg3_nvram_exec_cmd(tp, cmd);
3385 if (ret)
3386 break;
3387 }
3388 if (!tg3_flag(tp, FLASH)) {
3389 /* We always do complete word writes to eeprom. */
3390 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3391 }
3392
3393 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3394 if (ret)
3395 break;
3396 }
3397 return ret;
3398}
3399
3400/* offset and length are dword aligned */
3401static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3402{
3403 int ret;
3404
3405 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3406 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3407 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3408 udelay(40);
3409 }
3410
3411 if (!tg3_flag(tp, NVRAM)) {
3412 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3413 } else {
3414 u32 grc_mode;
3415
3416 ret = tg3_nvram_lock(tp);
3417 if (ret)
3418 return ret;
3419
3420 tg3_enable_nvram_access(tp);
3421 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3422 tw32(NVRAM_WRITE1, 0x406);
3423
3424 grc_mode = tr32(GRC_MODE);
3425 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3426
3427 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3428 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3429 buf);
3430 } else {
3431 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3432 buf);
3433 }
3434
3435 grc_mode = tr32(GRC_MODE);
3436 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3437
3438 tg3_disable_nvram_access(tp);
3439 tg3_nvram_unlock(tp);
3440 }
3441
3442 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3443 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3444 udelay(40);
3445 }
3446
3447 return ret;
3448}
3449
Matt Carlson997b4f12011-08-31 11:44:53 +00003450#define RX_CPU_SCRATCH_BASE 0x30000
3451#define RX_CPU_SCRATCH_SIZE 0x04000
3452#define TX_CPU_SCRATCH_BASE 0x34000
3453#define TX_CPU_SCRATCH_SIZE 0x04000
3454
3455/* tp->lock is held. */
Nithin Sujir837c45b2013-03-06 17:02:30 +00003456static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
Matt Carlson997b4f12011-08-31 11:44:53 +00003457{
3458 int i;
Nithin Sujir837c45b2013-03-06 17:02:30 +00003459 const int iters = 10000;
Matt Carlson997b4f12011-08-31 11:44:53 +00003460
Nithin Sujir837c45b2013-03-06 17:02:30 +00003461 for (i = 0; i < iters; i++) {
3462 tw32(cpu_base + CPU_STATE, 0xffffffff);
3463 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3464 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3465 break;
3466 }
3467
3468 return (i == iters) ? -EBUSY : 0;
3469}
3470
3471/* tp->lock is held. */
3472static int tg3_rxcpu_pause(struct tg3 *tp)
3473{
3474 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3475
3476 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3477 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3478 udelay(10);
3479
3480 return rc;
3481}
3482
3483/* tp->lock is held. */
3484static int tg3_txcpu_pause(struct tg3 *tp)
3485{
3486 return tg3_pause_cpu(tp, TX_CPU_BASE);
3487}
3488
3489/* tp->lock is held. */
3490static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3491{
3492 tw32(cpu_base + CPU_STATE, 0xffffffff);
3493 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3494}
3495
3496/* tp->lock is held. */
3497static void tg3_rxcpu_resume(struct tg3 *tp)
3498{
3499 tg3_resume_cpu(tp, RX_CPU_BASE);
3500}
3501
3502/* tp->lock is held. */
3503static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3504{
3505 int rc;
3506
3507 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
Matt Carlson997b4f12011-08-31 11:44:53 +00003508
Joe Perches41535772013-02-16 11:20:04 +00003509 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Matt Carlson997b4f12011-08-31 11:44:53 +00003510 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3511
3512 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3513 return 0;
3514 }
Nithin Sujir837c45b2013-03-06 17:02:30 +00003515 if (cpu_base == RX_CPU_BASE) {
3516 rc = tg3_rxcpu_pause(tp);
Matt Carlson997b4f12011-08-31 11:44:53 +00003517 } else {
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +00003518 /*
3519 * There is only an Rx CPU for the 5750 derivative in the
3520 * BCM4785.
3521 */
3522 if (tg3_flag(tp, IS_SSB_CORE))
3523 return 0;
3524
Nithin Sujir837c45b2013-03-06 17:02:30 +00003525 rc = tg3_txcpu_pause(tp);
Matt Carlson997b4f12011-08-31 11:44:53 +00003526 }
3527
Nithin Sujir837c45b2013-03-06 17:02:30 +00003528 if (rc) {
Matt Carlson997b4f12011-08-31 11:44:53 +00003529 netdev_err(tp->dev, "%s timed out, %s CPU\n",
Nithin Sujir837c45b2013-03-06 17:02:30 +00003530 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
Matt Carlson997b4f12011-08-31 11:44:53 +00003531 return -ENODEV;
3532 }
3533
3534 /* Clear firmware's nvram arbitration. */
3535 if (tg3_flag(tp, NVRAM))
3536 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3537 return 0;
3538}
3539
Nithin Sujir31f11a92013-03-06 17:02:33 +00003540static int tg3_fw_data_len(struct tg3 *tp,
3541 const struct tg3_firmware_hdr *fw_hdr)
3542{
3543 int fw_len;
3544
3545 /* Non fragmented firmware have one firmware header followed by a
3546 * contiguous chunk of data to be written. The length field in that
3547 * header is not the length of data to be written but the complete
3548 * length of the bss. The data length is determined based on
3549 * tp->fw->size minus headers.
3550 *
3551 * Fragmented firmware have a main header followed by multiple
3552 * fragments. Each fragment is identical to non fragmented firmware
3553 * with a firmware header followed by a contiguous chunk of data. In
3554 * the main header, the length field is unused and set to 0xffffffff.
3555 * In each fragment header the length is the entire size of that
3556 * fragment i.e. fragment data + header length. Data length is
3557 * therefore length field in the header minus TG3_FW_HDR_LEN.
3558 */
3559 if (tp->fw_len == 0xffffffff)
3560 fw_len = be32_to_cpu(fw_hdr->len);
3561 else
3562 fw_len = tp->fw->size;
3563
3564 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3565}
3566
Matt Carlson997b4f12011-08-31 11:44:53 +00003567/* tp->lock is held. */
3568static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3569 u32 cpu_scratch_base, int cpu_scratch_size,
Nithin Sujir77997ea2013-03-06 17:02:32 +00003570 const struct tg3_firmware_hdr *fw_hdr)
Matt Carlson997b4f12011-08-31 11:44:53 +00003571{
Nithin Sujirc4dab502013-03-06 17:02:34 +00003572 int err, i;
Matt Carlson997b4f12011-08-31 11:44:53 +00003573 void (*write_op)(struct tg3 *, u32, u32);
Nithin Sujir31f11a92013-03-06 17:02:33 +00003574 int total_len = tp->fw->size;
Matt Carlson997b4f12011-08-31 11:44:53 +00003575
3576 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3577 netdev_err(tp->dev,
3578 "%s: Trying to load TX cpu firmware which is 5705\n",
3579 __func__);
3580 return -EINVAL;
3581 }
3582
Nithin Sujirc4dab502013-03-06 17:02:34 +00003583 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
Matt Carlson997b4f12011-08-31 11:44:53 +00003584 write_op = tg3_write_mem;
3585 else
3586 write_op = tg3_write_indirect_reg32;
3587
Nithin Sujirc4dab502013-03-06 17:02:34 +00003588 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3589 /* It is possible that bootcode is still loading at this point.
3590 * Get the nvram lock first before halting the cpu.
3591 */
3592 int lock_err = tg3_nvram_lock(tp);
3593 err = tg3_halt_cpu(tp, cpu_base);
3594 if (!lock_err)
3595 tg3_nvram_unlock(tp);
3596 if (err)
3597 goto out;
Matt Carlson997b4f12011-08-31 11:44:53 +00003598
Nithin Sujirc4dab502013-03-06 17:02:34 +00003599 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3600 write_op(tp, cpu_scratch_base + i, 0);
3601 tw32(cpu_base + CPU_STATE, 0xffffffff);
3602 tw32(cpu_base + CPU_MODE,
3603 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3604 } else {
3605 /* Subtract additional main header for fragmented firmware and
3606 * advance to the first fragment
3607 */
3608 total_len -= TG3_FW_HDR_LEN;
3609 fw_hdr++;
3610 }
Nithin Sujir77997ea2013-03-06 17:02:32 +00003611
Nithin Sujir31f11a92013-03-06 17:02:33 +00003612 do {
3613 u32 *fw_data = (u32 *)(fw_hdr + 1);
3614 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3615 write_op(tp, cpu_scratch_base +
3616 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3617 (i * sizeof(u32)),
3618 be32_to_cpu(fw_data[i]));
3619
3620 total_len -= be32_to_cpu(fw_hdr->len);
3621
3622 /* Advance to next fragment */
3623 fw_hdr = (struct tg3_firmware_hdr *)
3624 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3625 } while (total_len > 0);
Matt Carlson997b4f12011-08-31 11:44:53 +00003626
3627 err = 0;
3628
3629out:
3630 return err;
3631}
3632
3633/* tp->lock is held. */
Nithin Sujirf4bffb22013-03-06 17:02:31 +00003634static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3635{
3636 int i;
3637 const int iters = 5;
3638
3639 tw32(cpu_base + CPU_STATE, 0xffffffff);
3640 tw32_f(cpu_base + CPU_PC, pc);
3641
3642 for (i = 0; i < iters; i++) {
3643 if (tr32(cpu_base + CPU_PC) == pc)
3644 break;
3645 tw32(cpu_base + CPU_STATE, 0xffffffff);
3646 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3647 tw32_f(cpu_base + CPU_PC, pc);
3648 udelay(1000);
3649 }
3650
3651 return (i == iters) ? -EBUSY : 0;
3652}
3653
3654/* tp->lock is held. */
Matt Carlson997b4f12011-08-31 11:44:53 +00003655static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3656{
Nithin Sujir77997ea2013-03-06 17:02:32 +00003657 const struct tg3_firmware_hdr *fw_hdr;
Nithin Sujirf4bffb22013-03-06 17:02:31 +00003658 int err;
Matt Carlson997b4f12011-08-31 11:44:53 +00003659
Nithin Sujir77997ea2013-03-06 17:02:32 +00003660 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
Matt Carlson997b4f12011-08-31 11:44:53 +00003661
3662 /* Firmware blob starts with version numbers, followed by
3663 start address and length. We are setting complete length.
3664 length = end_address_of_bss - start_address_of_text.
3665 Remainder is the blob to be loaded contiguously
3666 from start address. */
3667
Matt Carlson997b4f12011-08-31 11:44:53 +00003668 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3669 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
Nithin Sujir77997ea2013-03-06 17:02:32 +00003670 fw_hdr);
Matt Carlson997b4f12011-08-31 11:44:53 +00003671 if (err)
3672 return err;
3673
3674 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3675 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
Nithin Sujir77997ea2013-03-06 17:02:32 +00003676 fw_hdr);
Matt Carlson997b4f12011-08-31 11:44:53 +00003677 if (err)
3678 return err;
3679
3680 /* Now startup only the RX cpu. */
Nithin Sujir77997ea2013-03-06 17:02:32 +00003681 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3682 be32_to_cpu(fw_hdr->base_addr));
Nithin Sujirf4bffb22013-03-06 17:02:31 +00003683 if (err) {
Matt Carlson997b4f12011-08-31 11:44:53 +00003684 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3685 "should be %08x\n", __func__,
Nithin Sujir77997ea2013-03-06 17:02:32 +00003686 tr32(RX_CPU_BASE + CPU_PC),
3687 be32_to_cpu(fw_hdr->base_addr));
Matt Carlson997b4f12011-08-31 11:44:53 +00003688 return -ENODEV;
3689 }
Nithin Sujir837c45b2013-03-06 17:02:30 +00003690
3691 tg3_rxcpu_resume(tp);
Matt Carlson997b4f12011-08-31 11:44:53 +00003692
3693 return 0;
3694}
3695
Nithin Sujirc4dab502013-03-06 17:02:34 +00003696static int tg3_validate_rxcpu_state(struct tg3 *tp)
3697{
3698 const int iters = 1000;
3699 int i;
3700 u32 val;
3701
3702 /* Wait for boot code to complete initialization and enter service
3703 * loop. It is then safe to download service patches
3704 */
3705 for (i = 0; i < iters; i++) {
3706 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3707 break;
3708
3709 udelay(10);
3710 }
3711
3712 if (i == iters) {
3713 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3714 return -EBUSY;
3715 }
3716
3717 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3718 if (val & 0xff) {
3719 netdev_warn(tp->dev,
3720 "Other patches exist. Not downloading EEE patch\n");
3721 return -EEXIST;
3722 }
3723
3724 return 0;
3725}
3726
3727/* tp->lock is held. */
3728static void tg3_load_57766_firmware(struct tg3 *tp)
3729{
3730 struct tg3_firmware_hdr *fw_hdr;
3731
3732 if (!tg3_flag(tp, NO_NVRAM))
3733 return;
3734
3735 if (tg3_validate_rxcpu_state(tp))
3736 return;
3737
3738 if (!tp->fw)
3739 return;
3740
3741 /* This firmware blob has a different format than older firmware
3742 * releases as given below. The main difference is we have fragmented
3743 * data to be written to non-contiguous locations.
3744 *
3745 * In the beginning we have a firmware header identical to other
3746 * firmware which consists of version, base addr and length. The length
3747 * here is unused and set to 0xffffffff.
3748 *
3749 * This is followed by a series of firmware fragments which are
3750 * individually identical to previous firmware. i.e. they have the
3751 * firmware header and followed by data for that fragment. The version
3752 * field of the individual fragment header is unused.
3753 */
3754
3755 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3756 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3757 return;
3758
3759 if (tg3_rxcpu_pause(tp))
3760 return;
3761
3762 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3763 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3764
3765 tg3_rxcpu_resume(tp);
3766}
3767
Matt Carlson997b4f12011-08-31 11:44:53 +00003768/* tp->lock is held. */
3769static int tg3_load_tso_firmware(struct tg3 *tp)
3770{
Nithin Sujir77997ea2013-03-06 17:02:32 +00003771 const struct tg3_firmware_hdr *fw_hdr;
Matt Carlson997b4f12011-08-31 11:44:53 +00003772 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
Nithin Sujirf4bffb22013-03-06 17:02:31 +00003773 int err;
Matt Carlson997b4f12011-08-31 11:44:53 +00003774
Matt Carlson1caf13e2013-03-06 17:02:29 +00003775 if (!tg3_flag(tp, FW_TSO))
Matt Carlson997b4f12011-08-31 11:44:53 +00003776 return 0;
3777
Nithin Sujir77997ea2013-03-06 17:02:32 +00003778 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
Matt Carlson997b4f12011-08-31 11:44:53 +00003779
3780 /* Firmware blob starts with version numbers, followed by
3781 start address and length. We are setting complete length.
3782 length = end_address_of_bss - start_address_of_text.
3783 Remainder is the blob to be loaded contiguously
3784 from start address. */
3785
Matt Carlson997b4f12011-08-31 11:44:53 +00003786 cpu_scratch_size = tp->fw_len;
Matt Carlson997b4f12011-08-31 11:44:53 +00003787
Joe Perches41535772013-02-16 11:20:04 +00003788 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
Matt Carlson997b4f12011-08-31 11:44:53 +00003789 cpu_base = RX_CPU_BASE;
3790 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3791 } else {
3792 cpu_base = TX_CPU_BASE;
3793 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3794 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3795 }
3796
3797 err = tg3_load_firmware_cpu(tp, cpu_base,
3798 cpu_scratch_base, cpu_scratch_size,
Nithin Sujir77997ea2013-03-06 17:02:32 +00003799 fw_hdr);
Matt Carlson997b4f12011-08-31 11:44:53 +00003800 if (err)
3801 return err;
3802
3803 /* Now startup the cpu. */
Nithin Sujir77997ea2013-03-06 17:02:32 +00003804 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3805 be32_to_cpu(fw_hdr->base_addr));
Nithin Sujirf4bffb22013-03-06 17:02:31 +00003806 if (err) {
Matt Carlson997b4f12011-08-31 11:44:53 +00003807 netdev_err(tp->dev,
3808 "%s fails to set CPU PC, is %08x should be %08x\n",
Nithin Sujir77997ea2013-03-06 17:02:32 +00003809 __func__, tr32(cpu_base + CPU_PC),
3810 be32_to_cpu(fw_hdr->base_addr));
Matt Carlson997b4f12011-08-31 11:44:53 +00003811 return -ENODEV;
3812 }
Nithin Sujir837c45b2013-03-06 17:02:30 +00003813
3814 tg3_resume_cpu(tp, cpu_base);
Matt Carlson997b4f12011-08-31 11:44:53 +00003815 return 0;
3816}
3817
3818
Matt Carlsonffbcfed2009-02-25 14:24:28 +00003819/* tp->lock is held. */
Matt Carlson3f007892008-11-03 16:51:36 -08003820static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3821{
3822 u32 addr_high, addr_low;
3823 int i;
3824
3825 addr_high = ((tp->dev->dev_addr[0] << 8) |
3826 tp->dev->dev_addr[1]);
3827 addr_low = ((tp->dev->dev_addr[2] << 24) |
3828 (tp->dev->dev_addr[3] << 16) |
3829 (tp->dev->dev_addr[4] << 8) |
3830 (tp->dev->dev_addr[5] << 0));
3831 for (i = 0; i < 4; i++) {
3832 if (i == 1 && skip_mac_1)
3833 continue;
3834 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3835 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3836 }
3837
Joe Perches41535772013-02-16 11:20:04 +00003838 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3839 tg3_asic_rev(tp) == ASIC_REV_5704) {
Matt Carlson3f007892008-11-03 16:51:36 -08003840 for (i = 0; i < 12; i++) {
3841 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3842 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3843 }
3844 }
3845
3846 addr_high = (tp->dev->dev_addr[0] +
3847 tp->dev->dev_addr[1] +
3848 tp->dev->dev_addr[2] +
3849 tp->dev->dev_addr[3] +
3850 tp->dev->dev_addr[4] +
3851 tp->dev->dev_addr[5]) &
3852 TX_BACKOFF_SEED_MASK;
3853 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3854}
3855
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003856static void tg3_enable_register_access(struct tg3 *tp)
3857{
3858 /*
3859 * Make sure register accesses (indirect or otherwise) will function
3860 * correctly.
3861 */
3862 pci_write_config_dword(tp->pdev,
3863 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3864}
3865
3866static int tg3_power_up(struct tg3 *tp)
3867{
Matt Carlsonbed98292011-07-13 09:27:29 +00003868 int err;
3869
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003870 tg3_enable_register_access(tp);
3871
Matt Carlsonbed98292011-07-13 09:27:29 +00003872 err = pci_set_power_state(tp->pdev, PCI_D0);
3873 if (!err) {
3874 /* Switch out of Vaux if it is a NIC */
3875 tg3_pwrsrc_switch_to_vmain(tp);
3876 } else {
3877 netdev_err(tp->dev, "Transition to D0 failed\n");
3878 }
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003879
Matt Carlsonbed98292011-07-13 09:27:29 +00003880 return err;
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003881}
3882
Matt Carlson4b409522012-02-13 10:20:11 +00003883static int tg3_setup_phy(struct tg3 *, int);
3884
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003885static int tg3_power_down_prepare(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886{
3887 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003888 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003890 tg3_enable_register_access(tp);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08003891
3892 /* Restore the CLKREQ setting. */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06003893 if (tg3_flag(tp, CLKREQ_BUG))
3894 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3895 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08003896
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3898 tw32(TG3PCI_MISC_HOST_CTRL,
3899 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3900
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00003901 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
Joe Perches63c3a662011-04-26 08:12:10 +00003902 tg3_flag(tp, WOL_ENABLE);
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003903
Joe Perches63c3a662011-04-26 08:12:10 +00003904 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08003905 do_low_power = false;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003906 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
Matt Carlson80096062010-08-02 11:26:06 +00003907 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003908 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003909 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003910
Matt Carlson3f0e3ad2009-11-02 14:24:36 +00003911 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003912
Matt Carlson80096062010-08-02 11:26:06 +00003913 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003914
Matt Carlsonc6700ce2012-02-13 15:20:15 +00003915 tp->link_config.speed = phydev->speed;
3916 tp->link_config.duplex = phydev->duplex;
3917 tp->link_config.autoneg = phydev->autoneg;
3918 tp->link_config.advertising = phydev->advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003919
3920 advertising = ADVERTISED_TP |
3921 ADVERTISED_Pause |
3922 ADVERTISED_Autoneg |
3923 ADVERTISED_10baseT_Half;
3924
Joe Perches63c3a662011-04-26 08:12:10 +00003925 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3926 if (tg3_flag(tp, WOL_SPEED_100MB))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003927 advertising |=
3928 ADVERTISED_100baseT_Half |
3929 ADVERTISED_100baseT_Full |
3930 ADVERTISED_10baseT_Full;
3931 else
3932 advertising |= ADVERTISED_10baseT_Full;
3933 }
3934
3935 phydev->advertising = advertising;
3936
3937 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08003938
3939 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
Matt Carlson6a443a02010-02-17 15:17:04 +00003940 if (phyid != PHY_ID_BCMAC131) {
3941 phyid &= PHY_BCM_OUI_MASK;
3942 if (phyid == PHY_BCM_OUI_1 ||
3943 phyid == PHY_BCM_OUI_2 ||
3944 phyid == PHY_BCM_OUI_3)
Matt Carlson0a459aa2008-11-03 16:54:15 -08003945 do_low_power = true;
3946 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07003947 }
Matt Carlsondd477002008-05-25 23:45:58 -07003948 } else {
Matt Carlson20232762008-12-21 20:18:56 -08003949 do_low_power = true;
Matt Carlson0a459aa2008-11-03 16:54:15 -08003950
Matt Carlsonc6700ce2012-02-13 15:20:15 +00003951 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
Matt Carlson80096062010-08-02 11:26:06 +00003952 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953
Matt Carlson2855b9f2012-02-13 15:20:14 +00003954 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlsondd477002008-05-25 23:45:58 -07003955 tg3_setup_phy(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 }
3957
Joe Perches41535772013-02-16 11:20:04 +00003958 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chanb5d37722006-09-27 16:06:21 -07003959 u32 val;
3960
3961 val = tr32(GRC_VCPU_EXT_CTRL);
3962 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
Joe Perches63c3a662011-04-26 08:12:10 +00003963 } else if (!tg3_flag(tp, ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08003964 int i;
3965 u32 val;
3966
3967 for (i = 0; i < 200; i++) {
3968 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3969 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3970 break;
3971 msleep(1);
3972 }
3973 }
Joe Perches63c3a662011-04-26 08:12:10 +00003974 if (tg3_flag(tp, WOL_CAP))
Gary Zambranoa85feb82007-05-05 11:52:19 -07003975 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3976 WOL_DRV_STATE_SHUTDOWN |
3977 WOL_DRV_WOL |
3978 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08003979
Matt Carlson05ac4cb2008-11-03 16:53:46 -08003980 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 u32 mac_mode;
3982
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003983 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
Matt Carlsonb4bd2922011-04-20 07:57:41 +00003984 if (do_low_power &&
3985 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3986 tg3_phy_auxctl_write(tp,
3987 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3988 MII_TG3_AUXCTL_PCTL_WOL_EN |
3989 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3990 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
Matt Carlsondd477002008-05-25 23:45:58 -07003991 udelay(40);
3992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
Matt Carlsonf07e9af2010-08-02 11:26:07 +00003994 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chan3f7045c2006-09-27 16:02:29 -07003995 mac_mode = MAC_MODE_PORT_MODE_GMII;
3996 else
3997 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003999 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
Joe Perches41535772013-02-16 11:20:04 +00004000 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
Joe Perches63c3a662011-04-26 08:12:10 +00004001 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07004002 SPEED_100 : SPEED_10;
4003 if (tg3_5700_link_polarity(tp, speed))
4004 mac_mode |= MAC_MODE_LINK_POLARITY;
4005 else
4006 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4007 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 } else {
4009 mac_mode = MAC_MODE_PORT_MODE_TBI;
4010 }
4011
Joe Perches63c3a662011-04-26 08:12:10 +00004012 if (!tg3_flag(tp, 5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013 tw32(MAC_LED_CTRL, tp->led_ctrl);
4014
Matt Carlson05ac4cb2008-11-03 16:53:46 -08004015 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00004016 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4017 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
Matt Carlson05ac4cb2008-11-03 16:53:46 -08004018 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019
Joe Perches63c3a662011-04-26 08:12:10 +00004020 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsond2394e6b2010-11-24 08:31:47 +00004021 mac_mode |= MAC_MODE_APE_TX_EN |
4022 MAC_MODE_APE_RX_EN |
4023 MAC_MODE_TDE_ENABLE;
Matt Carlson3bda1252008-08-15 14:08:22 -07004024
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 tw32_f(MAC_MODE, mac_mode);
4026 udelay(100);
4027
4028 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4029 udelay(10);
4030 }
4031
Joe Perches63c3a662011-04-26 08:12:10 +00004032 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
Joe Perches41535772013-02-16 11:20:04 +00004033 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4034 tg3_asic_rev(tp) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 u32 base_val;
4036
4037 base_val = tp->pci_clock_ctrl;
4038 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4039 CLOCK_CTRL_TXCLK_DISABLE);
4040
Michael Chanb401e9e2005-12-19 16:27:04 -08004041 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4042 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Joe Perches63c3a662011-04-26 08:12:10 +00004043 } else if (tg3_flag(tp, 5780_CLASS) ||
4044 tg3_flag(tp, CPMU_PRESENT) ||
Joe Perches41535772013-02-16 11:20:04 +00004045 tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chan4cf78e42005-07-25 12:29:19 -07004046 /* do nothing */
Joe Perches63c3a662011-04-26 08:12:10 +00004047 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048 u32 newbits1, newbits2;
4049
Joe Perches41535772013-02-16 11:20:04 +00004050 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4051 tg3_asic_rev(tp) == ASIC_REV_5701) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4053 CLOCK_CTRL_TXCLK_DISABLE |
4054 CLOCK_CTRL_ALTCLK);
4055 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
Joe Perches63c3a662011-04-26 08:12:10 +00004056 } else if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 newbits1 = CLOCK_CTRL_625_CORE;
4058 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4059 } else {
4060 newbits1 = CLOCK_CTRL_ALTCLK;
4061 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4062 }
4063
Michael Chanb401e9e2005-12-19 16:27:04 -08004064 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4065 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066
Michael Chanb401e9e2005-12-19 16:27:04 -08004067 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4068 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069
Joe Perches63c3a662011-04-26 08:12:10 +00004070 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 u32 newbits3;
4072
Joe Perches41535772013-02-16 11:20:04 +00004073 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4074 tg3_asic_rev(tp) == ASIC_REV_5701) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4076 CLOCK_CTRL_TXCLK_DISABLE |
4077 CLOCK_CTRL_44MHZ_CORE);
4078 } else {
4079 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4080 }
4081
Michael Chanb401e9e2005-12-19 16:27:04 -08004082 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4083 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 }
4085 }
4086
Joe Perches63c3a662011-04-26 08:12:10 +00004087 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
Matt Carlson0a459aa2008-11-03 16:54:15 -08004088 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08004089
Matt Carlsoncd0d7222011-07-13 09:27:33 +00004090 tg3_frob_aux_power(tp, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091
4092 /* Workaround for unstable PLL clock */
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +00004093 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
Joe Perches41535772013-02-16 11:20:04 +00004094 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4095 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 u32 val = tr32(0x7d00);
4097
4098 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4099 tw32(0x7d00, val);
Joe Perches63c3a662011-04-26 08:12:10 +00004100 if (!tg3_flag(tp, ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08004101 int err;
4102
4103 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08004105 if (!err)
4106 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08004107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 }
4109
Michael Chanbbadf502006-04-06 21:46:34 -07004110 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4111
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112 return 0;
4113}
4114
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00004115static void tg3_power_down(struct tg3 *tp)
4116{
4117 tg3_power_down_prepare(tp);
4118
Joe Perches63c3a662011-04-26 08:12:10 +00004119 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +00004120 pci_set_power_state(tp->pdev, PCI_D3hot);
4121}
4122
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4124{
4125 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4126 case MII_TG3_AUX_STAT_10HALF:
4127 *speed = SPEED_10;
4128 *duplex = DUPLEX_HALF;
4129 break;
4130
4131 case MII_TG3_AUX_STAT_10FULL:
4132 *speed = SPEED_10;
4133 *duplex = DUPLEX_FULL;
4134 break;
4135
4136 case MII_TG3_AUX_STAT_100HALF:
4137 *speed = SPEED_100;
4138 *duplex = DUPLEX_HALF;
4139 break;
4140
4141 case MII_TG3_AUX_STAT_100FULL:
4142 *speed = SPEED_100;
4143 *duplex = DUPLEX_FULL;
4144 break;
4145
4146 case MII_TG3_AUX_STAT_1000HALF:
4147 *speed = SPEED_1000;
4148 *duplex = DUPLEX_HALF;
4149 break;
4150
4151 case MII_TG3_AUX_STAT_1000FULL:
4152 *speed = SPEED_1000;
4153 *duplex = DUPLEX_FULL;
4154 break;
4155
4156 default:
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004157 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
Michael Chan715116a2006-09-27 16:09:25 -07004158 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4159 SPEED_10;
4160 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4161 DUPLEX_HALF;
4162 break;
4163 }
Matt Carlsone7405222012-02-13 15:20:16 +00004164 *speed = SPEED_UNKNOWN;
4165 *duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168}
4169
Matt Carlson42b64a42011-05-19 12:12:49 +00004170static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171{
Matt Carlson42b64a42011-05-19 12:12:49 +00004172 int err = 0;
4173 u32 val, new_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174
Matt Carlson42b64a42011-05-19 12:12:49 +00004175 new_adv = ADVERTISE_CSMA;
Hiroaki SHIMODA202ff1c2011-11-22 04:05:41 +00004176 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
Matt Carlsonf88788f2011-12-14 11:10:00 +00004177 new_adv |= mii_advertise_flowctrl(flowctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
Matt Carlson42b64a42011-05-19 12:12:49 +00004179 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4180 if (err)
4181 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182
Matt Carlson4f272092011-12-14 11:09:57 +00004183 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4184 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08004185
Joe Perches41535772013-02-16 11:20:04 +00004186 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4187 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
Matt Carlson4f272092011-12-14 11:09:57 +00004188 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08004189
Matt Carlson4f272092011-12-14 11:09:57 +00004190 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4191 if (err)
4192 goto done;
4193 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08004194
Matt Carlson42b64a42011-05-19 12:12:49 +00004195 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4196 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197
Matt Carlson42b64a42011-05-19 12:12:49 +00004198 tw32(TG3_CPMU_EEE_MODE,
4199 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08004200
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00004201 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
Matt Carlson42b64a42011-05-19 12:12:49 +00004202 if (!err) {
4203 u32 err2;
Matt Carlson52b02d02010-10-14 10:37:41 +00004204
Matt Carlsona6b68da2010-12-06 08:28:52 +00004205 val = 0;
Matt Carlson42b64a42011-05-19 12:12:49 +00004206 /* Advertise 100-BaseTX EEE ability */
4207 if (advertise & ADVERTISED_100baseT_Full)
4208 val |= MDIO_AN_EEE_ADV_100TX;
4209 /* Advertise 1000-BaseT EEE ability */
4210 if (advertise & ADVERTISED_1000baseT_Full)
4211 val |= MDIO_AN_EEE_ADV_1000T;
4212 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
Matt Carlsonb715ce92011-07-20 10:20:52 +00004213 if (err)
4214 val = 0;
4215
Joe Perches41535772013-02-16 11:20:04 +00004216 switch (tg3_asic_rev(tp)) {
Matt Carlsonb715ce92011-07-20 10:20:52 +00004217 case ASIC_REV_5717:
4218 case ASIC_REV_57765:
Matt Carlson55086ad2011-12-14 11:09:59 +00004219 case ASIC_REV_57766:
Matt Carlsonb715ce92011-07-20 10:20:52 +00004220 case ASIC_REV_5719:
4221 /* If we advertised any eee advertisements above... */
4222 if (val)
4223 val = MII_TG3_DSP_TAP26_ALNOKO |
4224 MII_TG3_DSP_TAP26_RMRXSTO |
4225 MII_TG3_DSP_TAP26_OPCSINPT;
4226 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4227 /* Fall through */
4228 case ASIC_REV_5720:
Michael Chanc65a17f2013-01-06 12:51:07 +00004229 case ASIC_REV_5762:
Matt Carlsonb715ce92011-07-20 10:20:52 +00004230 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4231 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4232 MII_TG3_DSP_CH34TP2_HIBW01);
4233 }
Matt Carlson52b02d02010-10-14 10:37:41 +00004234
Nithin Nayak Sujirdaf3ec62013-01-14 17:11:00 +00004235 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
Matt Carlson42b64a42011-05-19 12:12:49 +00004236 if (!err)
4237 err = err2;
4238 }
4239
4240done:
4241 return err;
4242}
4243
4244static void tg3_phy_copper_begin(struct tg3 *tp)
4245{
Matt Carlsond13ba512012-02-22 12:35:19 +00004246 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4247 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4248 u32 adv, fc;
Matt Carlson42b64a42011-05-19 12:12:49 +00004249
Matt Carlsond13ba512012-02-22 12:35:19 +00004250 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4251 adv = ADVERTISED_10baseT_Half |
4252 ADVERTISED_10baseT_Full;
4253 if (tg3_flag(tp, WOL_SPEED_100MB))
4254 adv |= ADVERTISED_100baseT_Half |
4255 ADVERTISED_100baseT_Full;
Matt Carlson42b64a42011-05-19 12:12:49 +00004256
Matt Carlsond13ba512012-02-22 12:35:19 +00004257 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
Matt Carlson42b64a42011-05-19 12:12:49 +00004258 } else {
Matt Carlsond13ba512012-02-22 12:35:19 +00004259 adv = tp->link_config.advertising;
4260 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4261 adv &= ~(ADVERTISED_1000baseT_Half |
4262 ADVERTISED_1000baseT_Full);
4263
4264 fc = tp->link_config.flowctrl;
Matt Carlson42b64a42011-05-19 12:12:49 +00004265 }
4266
Matt Carlsond13ba512012-02-22 12:35:19 +00004267 tg3_phy_autoneg_cfg(tp, adv, fc);
Matt Carlson52b02d02010-10-14 10:37:41 +00004268
Matt Carlsond13ba512012-02-22 12:35:19 +00004269 tg3_writephy(tp, MII_BMCR,
4270 BMCR_ANENABLE | BMCR_ANRESTART);
4271 } else {
4272 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 u32 bmcr, orig_bmcr;
4274
4275 tp->link_config.active_speed = tp->link_config.speed;
4276 tp->link_config.active_duplex = tp->link_config.duplex;
4277
4278 bmcr = 0;
4279 switch (tp->link_config.speed) {
4280 default:
4281 case SPEED_10:
4282 break;
4283
4284 case SPEED_100:
4285 bmcr |= BMCR_SPEED100;
4286 break;
4287
4288 case SPEED_1000:
Matt Carlson221c5632011-06-13 13:39:01 +00004289 bmcr |= BMCR_SPEED1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292
4293 if (tp->link_config.duplex == DUPLEX_FULL)
4294 bmcr |= BMCR_FULLDPLX;
4295
4296 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4297 (bmcr != orig_bmcr)) {
4298 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4299 for (i = 0; i < 1500; i++) {
4300 u32 tmp;
4301
4302 udelay(10);
4303 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4304 tg3_readphy(tp, MII_BMSR, &tmp))
4305 continue;
4306 if (!(tmp & BMSR_LSTATUS)) {
4307 udelay(40);
4308 break;
4309 }
4310 }
4311 tg3_writephy(tp, MII_BMCR, bmcr);
4312 udelay(40);
4313 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 }
4315}
4316
4317static int tg3_init_5401phy_dsp(struct tg3 *tp)
4318{
4319 int err;
4320
4321 /* Turn off tap power management. */
4322 /* Set Extended packet length bit */
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004323 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324
Matt Carlson6ee7c0a2010-08-02 11:26:04 +00004325 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4326 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4327 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4328 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4329 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330
4331 udelay(40);
4332
4333 return err;
4334}
4335
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004336static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337{
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004338 u32 advmsk, tgtadv, advertising;
Michael Chan3600d912006-12-07 00:21:48 -08004339
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004340 advertising = tp->link_config.advertising;
4341 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004343 advmsk = ADVERTISE_ALL;
4344 if (tp->link_config.active_duplex == DUPLEX_FULL) {
Matt Carlsonf88788f2011-12-14 11:10:00 +00004345 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004346 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4347 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004349 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4350 return false;
4351
4352 if ((*lcladv & advmsk) != tgtadv)
4353 return false;
Matt Carlsonb99d2a52011-08-31 11:44:47 +00004354
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004355 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 u32 tg3_ctrl;
4357
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004358 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
Michael Chan3600d912006-12-07 00:21:48 -08004359
Matt Carlson221c5632011-06-13 13:39:01 +00004360 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004361 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362
Matt Carlson3198e072012-02-13 15:20:10 +00004363 if (tgtadv &&
Joe Perches41535772013-02-16 11:20:04 +00004364 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4365 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
Matt Carlson3198e072012-02-13 15:20:10 +00004366 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4367 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4368 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4369 } else {
4370 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4371 }
4372
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004373 if (tg3_ctrl != tgtadv)
4374 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375 }
Matt Carlson93a700a2011-08-31 11:44:54 +00004376
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004377 return true;
Matt Carlsonef167e22007-12-20 20:10:01 -08004378}
4379
Matt Carlson859edb22011-12-08 14:40:16 +00004380static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4381{
4382 u32 lpeth = 0;
4383
4384 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4385 u32 val;
4386
4387 if (tg3_readphy(tp, MII_STAT1000, &val))
4388 return false;
4389
4390 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4391 }
4392
4393 if (tg3_readphy(tp, MII_LPA, rmtadv))
4394 return false;
4395
4396 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4397 tp->link_config.rmt_adv = lpeth;
4398
4399 return true;
4400}
4401
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00004402static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4403{
4404 if (curr_link_up != tp->link_up) {
4405 if (curr_link_up) {
4406 tg3_carrier_on(tp);
4407 } else {
4408 tg3_carrier_off(tp);
4409 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4410 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4411 }
4412
4413 tg3_link_report(tp);
4414 return true;
4415 }
4416
4417 return false;
4418}
4419
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4421{
4422 int current_link_up;
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004423 u32 bmsr, val;
Matt Carlsonef167e22007-12-20 20:10:01 -08004424 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 u16 current_speed;
4426 u8 current_duplex;
4427 int i, err;
4428
4429 tw32(MAC_EVENT, 0);
4430
4431 tw32_f(MAC_STATUS,
4432 (MAC_STATUS_SYNC_CHANGED |
4433 MAC_STATUS_CFG_CHANGED |
4434 MAC_STATUS_MI_COMPLETION |
4435 MAC_STATUS_LNKSTATE_CHANGED));
4436 udelay(40);
4437
Matt Carlson8ef21422008-05-02 16:47:53 -07004438 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4439 tw32_f(MAC_MI_MODE,
4440 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4441 udelay(80);
4442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004444 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445
4446 /* Some third-party PHYs need to be reset on link going
4447 * down.
4448 */
Joe Perches41535772013-02-16 11:20:04 +00004449 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4450 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4451 tg3_asic_rev(tp) == ASIC_REV_5705) &&
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00004452 tp->link_up) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 tg3_readphy(tp, MII_BMSR, &bmsr);
4454 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4455 !(bmsr & BMSR_LSTATUS))
4456 force_reset = 1;
4457 }
4458 if (force_reset)
4459 tg3_phy_reset(tp);
4460
Matt Carlson79eb6902010-02-17 15:17:03 +00004461 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 tg3_readphy(tp, MII_BMSR, &bmsr);
4463 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
Joe Perches63c3a662011-04-26 08:12:10 +00004464 !tg3_flag(tp, INIT_COMPLETE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 bmsr = 0;
4466
4467 if (!(bmsr & BMSR_LSTATUS)) {
4468 err = tg3_init_5401phy_dsp(tp);
4469 if (err)
4470 return err;
4471
4472 tg3_readphy(tp, MII_BMSR, &bmsr);
4473 for (i = 0; i < 1000; i++) {
4474 udelay(10);
4475 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4476 (bmsr & BMSR_LSTATUS)) {
4477 udelay(40);
4478 break;
4479 }
4480 }
4481
Matt Carlson79eb6902010-02-17 15:17:03 +00004482 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4483 TG3_PHY_REV_BCM5401_B0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 !(bmsr & BMSR_LSTATUS) &&
4485 tp->link_config.active_speed == SPEED_1000) {
4486 err = tg3_phy_reset(tp);
4487 if (!err)
4488 err = tg3_init_5401phy_dsp(tp);
4489 if (err)
4490 return err;
4491 }
4492 }
Joe Perches41535772013-02-16 11:20:04 +00004493 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4494 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 /* 5701 {A0,B0} CRC bug workaround */
4496 tg3_writephy(tp, 0x15, 0x0a75);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00004497 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4498 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4499 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500 }
4501
4502 /* Clear pending interrupts... */
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004503 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4504 tg3_readphy(tp, MII_TG3_ISTAT, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004506 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004508 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4510
Joe Perches41535772013-02-16 11:20:04 +00004511 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4512 tg3_asic_rev(tp) == ASIC_REV_5701) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4514 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4515 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4516 else
4517 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4518 }
4519
4520 current_link_up = 0;
Matt Carlsone7405222012-02-13 15:20:16 +00004521 current_speed = SPEED_UNKNOWN;
4522 current_duplex = DUPLEX_UNKNOWN;
Matt Carlsone348c5e2011-11-21 15:01:20 +00004523 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
Matt Carlson859edb22011-12-08 14:40:16 +00004524 tp->link_config.rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004526 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
Matt Carlson15ee95c2011-04-20 07:57:40 +00004527 err = tg3_phy_auxctl_read(tp,
4528 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4529 &val);
4530 if (!err && !(val & (1 << 10))) {
Matt Carlsonb4bd2922011-04-20 07:57:41 +00004531 tg3_phy_auxctl_write(tp,
4532 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4533 val | (1 << 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534 goto relink;
4535 }
4536 }
4537
4538 bmsr = 0;
4539 for (i = 0; i < 100; i++) {
4540 tg3_readphy(tp, MII_BMSR, &bmsr);
4541 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4542 (bmsr & BMSR_LSTATUS))
4543 break;
4544 udelay(40);
4545 }
4546
4547 if (bmsr & BMSR_LSTATUS) {
4548 u32 aux_stat, bmcr;
4549
4550 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4551 for (i = 0; i < 2000; i++) {
4552 udelay(10);
4553 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4554 aux_stat)
4555 break;
4556 }
4557
4558 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4559 &current_speed,
4560 &current_duplex);
4561
4562 bmcr = 0;
4563 for (i = 0; i < 200; i++) {
4564 tg3_readphy(tp, MII_BMCR, &bmcr);
4565 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4566 continue;
4567 if (bmcr && bmcr != 0x7fff)
4568 break;
4569 udelay(10);
4570 }
4571
Matt Carlsonef167e22007-12-20 20:10:01 -08004572 lcl_adv = 0;
4573 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574
Matt Carlsonef167e22007-12-20 20:10:01 -08004575 tp->link_config.active_speed = current_speed;
4576 tp->link_config.active_duplex = current_duplex;
4577
4578 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4579 if ((bmcr & BMCR_ANENABLE) &&
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004580 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
Matt Carlson859edb22011-12-08 14:40:16 +00004581 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
Matt Carlsone2bf73e2011-12-08 14:40:15 +00004582 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583 } else {
4584 if (!(bmcr & BMCR_ANENABLE) &&
4585 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08004586 tp->link_config.duplex == current_duplex &&
4587 tp->link_config.flowctrl ==
4588 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590 }
4591 }
4592
Matt Carlsonef167e22007-12-20 20:10:01 -08004593 if (current_link_up == 1 &&
Matt Carlsone348c5e2011-11-21 15:01:20 +00004594 tp->link_config.active_duplex == DUPLEX_FULL) {
4595 u32 reg, bit;
4596
4597 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4598 reg = MII_TG3_FET_GEN_STAT;
4599 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4600 } else {
4601 reg = MII_TG3_EXT_STAT;
4602 bit = MII_TG3_EXT_STAT_MDIX;
4603 }
4604
4605 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4606 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4607
Matt Carlsonef167e22007-12-20 20:10:01 -08004608 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Matt Carlsone348c5e2011-11-21 15:01:20 +00004609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 }
4611
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612relink:
Matt Carlson80096062010-08-02 11:26:06 +00004613 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 tg3_phy_copper_begin(tp);
4615
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +00004616 if (tg3_flag(tp, ROBOSWITCH)) {
4617 current_link_up = 1;
4618 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4619 current_speed = SPEED_1000;
4620 current_duplex = DUPLEX_FULL;
4621 tp->link_config.active_speed = current_speed;
4622 tp->link_config.active_duplex = current_duplex;
4623 }
4624
Matt Carlsonf833c4c2010-09-15 09:00:01 +00004625 tg3_readphy(tp, MII_BMSR, &bmsr);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00004626 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4627 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 current_link_up = 1;
4629 }
4630
4631 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4632 if (current_link_up == 1) {
4633 if (tp->link_config.active_speed == SPEED_100 ||
4634 tp->link_config.active_speed == SPEED_10)
4635 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4636 else
4637 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00004638 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
Matt Carlson7f97a4b2009-08-25 10:10:03 +00004639 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4640 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4642
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +00004643 /* In order for the 5750 core in BCM4785 chip to work properly
4644 * in RGMII mode, the Led Control Register must be set up.
4645 */
4646 if (tg3_flag(tp, RGMII_MODE)) {
4647 u32 led_ctrl = tr32(MAC_LED_CTRL);
4648 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4649
4650 if (tp->link_config.active_speed == SPEED_10)
4651 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4652 else if (tp->link_config.active_speed == SPEED_100)
4653 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4654 LED_CTRL_100MBPS_ON);
4655 else if (tp->link_config.active_speed == SPEED_1000)
4656 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4657 LED_CTRL_1000MBPS_ON);
4658
4659 tw32(MAC_LED_CTRL, led_ctrl);
4660 udelay(40);
4661 }
4662
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4664 if (tp->link_config.active_duplex == DUPLEX_HALF)
4665 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4666
Joe Perches41535772013-02-16 11:20:04 +00004667 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07004668 if (current_link_up == 1 &&
4669 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07004671 else
4672 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 }
4674
4675 /* ??? Without this setting Netgear GA302T PHY does not
4676 * ??? send/receive packets...
4677 */
Matt Carlson79eb6902010-02-17 15:17:03 +00004678 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
Joe Perches41535772013-02-16 11:20:04 +00004679 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4681 tw32_f(MAC_MI_MODE, tp->mi_mode);
4682 udelay(80);
4683 }
4684
4685 tw32_f(MAC_MODE, tp->mac_mode);
4686 udelay(40);
4687
Matt Carlson52b02d02010-10-14 10:37:41 +00004688 tg3_phy_eee_adjust(tp, current_link_up);
4689
Joe Perches63c3a662011-04-26 08:12:10 +00004690 if (tg3_flag(tp, USE_LINKCHG_REG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691 /* Polled via timer. */
4692 tw32_f(MAC_EVENT, 0);
4693 } else {
4694 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4695 }
4696 udelay(40);
4697
Joe Perches41535772013-02-16 11:20:04 +00004698 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 current_link_up == 1 &&
4700 tp->link_config.active_speed == SPEED_1000 &&
Joe Perches63c3a662011-04-26 08:12:10 +00004701 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702 udelay(120);
4703 tw32_f(MAC_STATUS,
4704 (MAC_STATUS_SYNC_CHANGED |
4705 MAC_STATUS_CFG_CHANGED));
4706 udelay(40);
4707 tg3_write_mem(tp,
4708 NIC_SRAM_FIRMWARE_MBOX,
4709 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4710 }
4711
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004712 /* Prevent send BD corruption. */
Joe Perches63c3a662011-04-26 08:12:10 +00004713 if (tg3_flag(tp, CLKREQ_BUG)) {
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004714 if (tp->link_config.active_speed == SPEED_100 ||
4715 tp->link_config.active_speed == SPEED_10)
Jiang Liu0f49bfb2012-08-20 13:28:20 -06004716 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4717 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004718 else
Jiang Liu0f49bfb2012-08-20 13:28:20 -06004719 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4720 PCI_EXP_LNKCTL_CLKREQ_EN);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08004721 }
4722
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00004723 tg3_test_and_report_link_chg(tp, current_link_up);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724
4725 return 0;
4726}
4727
4728struct tg3_fiber_aneginfo {
4729 int state;
4730#define ANEG_STATE_UNKNOWN 0
4731#define ANEG_STATE_AN_ENABLE 1
4732#define ANEG_STATE_RESTART_INIT 2
4733#define ANEG_STATE_RESTART 3
4734#define ANEG_STATE_DISABLE_LINK_OK 4
4735#define ANEG_STATE_ABILITY_DETECT_INIT 5
4736#define ANEG_STATE_ABILITY_DETECT 6
4737#define ANEG_STATE_ACK_DETECT_INIT 7
4738#define ANEG_STATE_ACK_DETECT 8
4739#define ANEG_STATE_COMPLETE_ACK_INIT 9
4740#define ANEG_STATE_COMPLETE_ACK 10
4741#define ANEG_STATE_IDLE_DETECT_INIT 11
4742#define ANEG_STATE_IDLE_DETECT 12
4743#define ANEG_STATE_LINK_OK 13
4744#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4745#define ANEG_STATE_NEXT_PAGE_WAIT 15
4746
4747 u32 flags;
4748#define MR_AN_ENABLE 0x00000001
4749#define MR_RESTART_AN 0x00000002
4750#define MR_AN_COMPLETE 0x00000004
4751#define MR_PAGE_RX 0x00000008
4752#define MR_NP_LOADED 0x00000010
4753#define MR_TOGGLE_TX 0x00000020
4754#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4755#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4756#define MR_LP_ADV_SYM_PAUSE 0x00000100
4757#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4758#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4759#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4760#define MR_LP_ADV_NEXT_PAGE 0x00001000
4761#define MR_TOGGLE_RX 0x00002000
4762#define MR_NP_RX 0x00004000
4763
4764#define MR_LINK_OK 0x80000000
4765
4766 unsigned long link_time, cur_time;
4767
4768 u32 ability_match_cfg;
4769 int ability_match_count;
4770
4771 char ability_match, idle_match, ack_match;
4772
4773 u32 txconfig, rxconfig;
4774#define ANEG_CFG_NP 0x00000080
4775#define ANEG_CFG_ACK 0x00000040
4776#define ANEG_CFG_RF2 0x00000020
4777#define ANEG_CFG_RF1 0x00000010
4778#define ANEG_CFG_PS2 0x00000001
4779#define ANEG_CFG_PS1 0x00008000
4780#define ANEG_CFG_HD 0x00004000
4781#define ANEG_CFG_FD 0x00002000
4782#define ANEG_CFG_INVAL 0x00001f06
4783
4784};
4785#define ANEG_OK 0
4786#define ANEG_DONE 1
4787#define ANEG_TIMER_ENAB 2
4788#define ANEG_FAILED -1
4789
4790#define ANEG_STATE_SETTLE_TIME 10000
4791
4792static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4793 struct tg3_fiber_aneginfo *ap)
4794{
Matt Carlson5be73b42007-12-20 20:09:29 -08004795 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796 unsigned long delta;
4797 u32 rx_cfg_reg;
4798 int ret;
4799
4800 if (ap->state == ANEG_STATE_UNKNOWN) {
4801 ap->rxconfig = 0;
4802 ap->link_time = 0;
4803 ap->cur_time = 0;
4804 ap->ability_match_cfg = 0;
4805 ap->ability_match_count = 0;
4806 ap->ability_match = 0;
4807 ap->idle_match = 0;
4808 ap->ack_match = 0;
4809 }
4810 ap->cur_time++;
4811
4812 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4813 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4814
4815 if (rx_cfg_reg != ap->ability_match_cfg) {
4816 ap->ability_match_cfg = rx_cfg_reg;
4817 ap->ability_match = 0;
4818 ap->ability_match_count = 0;
4819 } else {
4820 if (++ap->ability_match_count > 1) {
4821 ap->ability_match = 1;
4822 ap->ability_match_cfg = rx_cfg_reg;
4823 }
4824 }
4825 if (rx_cfg_reg & ANEG_CFG_ACK)
4826 ap->ack_match = 1;
4827 else
4828 ap->ack_match = 0;
4829
4830 ap->idle_match = 0;
4831 } else {
4832 ap->idle_match = 1;
4833 ap->ability_match_cfg = 0;
4834 ap->ability_match_count = 0;
4835 ap->ability_match = 0;
4836 ap->ack_match = 0;
4837
4838 rx_cfg_reg = 0;
4839 }
4840
4841 ap->rxconfig = rx_cfg_reg;
4842 ret = ANEG_OK;
4843
Matt Carlson33f401a2010-04-05 10:19:27 +00004844 switch (ap->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 case ANEG_STATE_UNKNOWN:
4846 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4847 ap->state = ANEG_STATE_AN_ENABLE;
4848
4849 /* fallthru */
4850 case ANEG_STATE_AN_ENABLE:
4851 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4852 if (ap->flags & MR_AN_ENABLE) {
4853 ap->link_time = 0;
4854 ap->cur_time = 0;
4855 ap->ability_match_cfg = 0;
4856 ap->ability_match_count = 0;
4857 ap->ability_match = 0;
4858 ap->idle_match = 0;
4859 ap->ack_match = 0;
4860
4861 ap->state = ANEG_STATE_RESTART_INIT;
4862 } else {
4863 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4864 }
4865 break;
4866
4867 case ANEG_STATE_RESTART_INIT:
4868 ap->link_time = ap->cur_time;
4869 ap->flags &= ~(MR_NP_LOADED);
4870 ap->txconfig = 0;
4871 tw32(MAC_TX_AUTO_NEG, 0);
4872 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4873 tw32_f(MAC_MODE, tp->mac_mode);
4874 udelay(40);
4875
4876 ret = ANEG_TIMER_ENAB;
4877 ap->state = ANEG_STATE_RESTART;
4878
4879 /* fallthru */
4880 case ANEG_STATE_RESTART:
4881 delta = ap->cur_time - ap->link_time;
Matt Carlson859a588792010-04-05 10:19:28 +00004882 if (delta > ANEG_STATE_SETTLE_TIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
Matt Carlson859a588792010-04-05 10:19:28 +00004884 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885 ret = ANEG_TIMER_ENAB;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886 break;
4887
4888 case ANEG_STATE_DISABLE_LINK_OK:
4889 ret = ANEG_DONE;
4890 break;
4891
4892 case ANEG_STATE_ABILITY_DETECT_INIT:
4893 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08004894 ap->txconfig = ANEG_CFG_FD;
4895 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4896 if (flowctrl & ADVERTISE_1000XPAUSE)
4897 ap->txconfig |= ANEG_CFG_PS1;
4898 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4899 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4901 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4902 tw32_f(MAC_MODE, tp->mac_mode);
4903 udelay(40);
4904
4905 ap->state = ANEG_STATE_ABILITY_DETECT;
4906 break;
4907
4908 case ANEG_STATE_ABILITY_DETECT:
Matt Carlson859a588792010-04-05 10:19:28 +00004909 if (ap->ability_match != 0 && ap->rxconfig != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004910 ap->state = ANEG_STATE_ACK_DETECT_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911 break;
4912
4913 case ANEG_STATE_ACK_DETECT_INIT:
4914 ap->txconfig |= ANEG_CFG_ACK;
4915 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4916 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4917 tw32_f(MAC_MODE, tp->mac_mode);
4918 udelay(40);
4919
4920 ap->state = ANEG_STATE_ACK_DETECT;
4921
4922 /* fallthru */
4923 case ANEG_STATE_ACK_DETECT:
4924 if (ap->ack_match != 0) {
4925 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4926 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4927 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4928 } else {
4929 ap->state = ANEG_STATE_AN_ENABLE;
4930 }
4931 } else if (ap->ability_match != 0 &&
4932 ap->rxconfig == 0) {
4933 ap->state = ANEG_STATE_AN_ENABLE;
4934 }
4935 break;
4936
4937 case ANEG_STATE_COMPLETE_ACK_INIT:
4938 if (ap->rxconfig & ANEG_CFG_INVAL) {
4939 ret = ANEG_FAILED;
4940 break;
4941 }
4942 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4943 MR_LP_ADV_HALF_DUPLEX |
4944 MR_LP_ADV_SYM_PAUSE |
4945 MR_LP_ADV_ASYM_PAUSE |
4946 MR_LP_ADV_REMOTE_FAULT1 |
4947 MR_LP_ADV_REMOTE_FAULT2 |
4948 MR_LP_ADV_NEXT_PAGE |
4949 MR_TOGGLE_RX |
4950 MR_NP_RX);
4951 if (ap->rxconfig & ANEG_CFG_FD)
4952 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4953 if (ap->rxconfig & ANEG_CFG_HD)
4954 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4955 if (ap->rxconfig & ANEG_CFG_PS1)
4956 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4957 if (ap->rxconfig & ANEG_CFG_PS2)
4958 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4959 if (ap->rxconfig & ANEG_CFG_RF1)
4960 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4961 if (ap->rxconfig & ANEG_CFG_RF2)
4962 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4963 if (ap->rxconfig & ANEG_CFG_NP)
4964 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4965
4966 ap->link_time = ap->cur_time;
4967
4968 ap->flags ^= (MR_TOGGLE_TX);
4969 if (ap->rxconfig & 0x0008)
4970 ap->flags |= MR_TOGGLE_RX;
4971 if (ap->rxconfig & ANEG_CFG_NP)
4972 ap->flags |= MR_NP_RX;
4973 ap->flags |= MR_PAGE_RX;
4974
4975 ap->state = ANEG_STATE_COMPLETE_ACK;
4976 ret = ANEG_TIMER_ENAB;
4977 break;
4978
4979 case ANEG_STATE_COMPLETE_ACK:
4980 if (ap->ability_match != 0 &&
4981 ap->rxconfig == 0) {
4982 ap->state = ANEG_STATE_AN_ENABLE;
4983 break;
4984 }
4985 delta = ap->cur_time - ap->link_time;
4986 if (delta > ANEG_STATE_SETTLE_TIME) {
4987 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4988 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4989 } else {
4990 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4991 !(ap->flags & MR_NP_RX)) {
4992 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4993 } else {
4994 ret = ANEG_FAILED;
4995 }
4996 }
4997 }
4998 break;
4999
5000 case ANEG_STATE_IDLE_DETECT_INIT:
5001 ap->link_time = ap->cur_time;
5002 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5003 tw32_f(MAC_MODE, tp->mac_mode);
5004 udelay(40);
5005
5006 ap->state = ANEG_STATE_IDLE_DETECT;
5007 ret = ANEG_TIMER_ENAB;
5008 break;
5009
5010 case ANEG_STATE_IDLE_DETECT:
5011 if (ap->ability_match != 0 &&
5012 ap->rxconfig == 0) {
5013 ap->state = ANEG_STATE_AN_ENABLE;
5014 break;
5015 }
5016 delta = ap->cur_time - ap->link_time;
5017 if (delta > ANEG_STATE_SETTLE_TIME) {
5018 /* XXX another gem from the Broadcom driver :( */
5019 ap->state = ANEG_STATE_LINK_OK;
5020 }
5021 break;
5022
5023 case ANEG_STATE_LINK_OK:
5024 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5025 ret = ANEG_DONE;
5026 break;
5027
5028 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5029 /* ??? unimplemented */
5030 break;
5031
5032 case ANEG_STATE_NEXT_PAGE_WAIT:
5033 /* ??? unimplemented */
5034 break;
5035
5036 default:
5037 ret = ANEG_FAILED;
5038 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040
5041 return ret;
5042}
5043
Matt Carlson5be73b42007-12-20 20:09:29 -08005044static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045{
5046 int res = 0;
5047 struct tg3_fiber_aneginfo aninfo;
5048 int status = ANEG_FAILED;
5049 unsigned int tick;
5050 u32 tmp;
5051
5052 tw32_f(MAC_TX_AUTO_NEG, 0);
5053
5054 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5055 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5056 udelay(40);
5057
5058 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5059 udelay(40);
5060
5061 memset(&aninfo, 0, sizeof(aninfo));
5062 aninfo.flags |= MR_AN_ENABLE;
5063 aninfo.state = ANEG_STATE_UNKNOWN;
5064 aninfo.cur_time = 0;
5065 tick = 0;
5066 while (++tick < 195000) {
5067 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5068 if (status == ANEG_DONE || status == ANEG_FAILED)
5069 break;
5070
5071 udelay(1);
5072 }
5073
5074 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5075 tw32_f(MAC_MODE, tp->mac_mode);
5076 udelay(40);
5077
Matt Carlson5be73b42007-12-20 20:09:29 -08005078 *txflags = aninfo.txconfig;
5079 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080
5081 if (status == ANEG_DONE &&
5082 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5083 MR_LP_ADV_FULL_DUPLEX)))
5084 res = 1;
5085
5086 return res;
5087}
5088
5089static void tg3_init_bcm8002(struct tg3 *tp)
5090{
5091 u32 mac_status = tr32(MAC_STATUS);
5092 int i;
5093
5094 /* Reset when initting first time or we have a link. */
Joe Perches63c3a662011-04-26 08:12:10 +00005095 if (tg3_flag(tp, INIT_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096 !(mac_status & MAC_STATUS_PCS_SYNCED))
5097 return;
5098
5099 /* Set PLL lock range. */
5100 tg3_writephy(tp, 0x16, 0x8007);
5101
5102 /* SW reset */
5103 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5104
5105 /* Wait for reset to complete. */
5106 /* XXX schedule_timeout() ... */
5107 for (i = 0; i < 500; i++)
5108 udelay(10);
5109
5110 /* Config mode; select PMA/Ch 1 regs. */
5111 tg3_writephy(tp, 0x10, 0x8411);
5112
5113 /* Enable auto-lock and comdet, select txclk for tx. */
5114 tg3_writephy(tp, 0x11, 0x0a10);
5115
5116 tg3_writephy(tp, 0x18, 0x00a0);
5117 tg3_writephy(tp, 0x16, 0x41ff);
5118
5119 /* Assert and deassert POR. */
5120 tg3_writephy(tp, 0x13, 0x0400);
5121 udelay(40);
5122 tg3_writephy(tp, 0x13, 0x0000);
5123
5124 tg3_writephy(tp, 0x11, 0x0a50);
5125 udelay(40);
5126 tg3_writephy(tp, 0x11, 0x0a10);
5127
5128 /* Wait for signal to stabilize */
5129 /* XXX schedule_timeout() ... */
5130 for (i = 0; i < 15000; i++)
5131 udelay(10);
5132
5133 /* Deselect the channel register so we can read the PHYID
5134 * later.
5135 */
5136 tg3_writephy(tp, 0x10, 0x8011);
5137}
5138
5139static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5140{
Matt Carlson82cd3d12007-12-20 20:09:00 -08005141 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142 u32 sg_dig_ctrl, sg_dig_status;
5143 u32 serdes_cfg, expected_sg_dig_ctrl;
5144 int workaround, port_a;
5145 int current_link_up;
5146
5147 serdes_cfg = 0;
5148 expected_sg_dig_ctrl = 0;
5149 workaround = 0;
5150 port_a = 1;
5151 current_link_up = 0;
5152
Joe Perches41535772013-02-16 11:20:04 +00005153 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5154 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155 workaround = 1;
5156 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5157 port_a = 0;
5158
5159 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5160 /* preserve bits 20-23 for voltage regulator */
5161 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5162 }
5163
5164 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5165
5166 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005167 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 if (workaround) {
5169 u32 val = serdes_cfg;
5170
5171 if (port_a)
5172 val |= 0xc010000;
5173 else
5174 val |= 0x4010000;
5175 tw32_f(MAC_SERDES_CFG, val);
5176 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005177
5178 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179 }
5180 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5181 tg3_setup_flow_control(tp, 0, 0);
5182 current_link_up = 1;
5183 }
5184 goto out;
5185 }
5186
5187 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005188 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189
Matt Carlson82cd3d12007-12-20 20:09:00 -08005190 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5191 if (flowctrl & ADVERTISE_1000XPAUSE)
5192 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5193 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5194 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195
5196 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005197 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
Michael Chan3d3ebe72006-09-27 15:59:15 -07005198 tp->serdes_counter &&
5199 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5200 MAC_STATUS_RCVD_CFG)) ==
5201 MAC_STATUS_PCS_SYNCED)) {
5202 tp->serdes_counter--;
5203 current_link_up = 1;
5204 goto out;
5205 }
5206restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207 if (workaround)
5208 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005209 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210 udelay(5);
5211 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5212
Michael Chan3d3ebe72006-09-27 15:59:15 -07005213 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005214 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5216 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07005217 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218 mac_status = tr32(MAC_STATUS);
5219
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005220 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08005222 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223
Matt Carlson82cd3d12007-12-20 20:09:00 -08005224 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5225 local_adv |= ADVERTISE_1000XPAUSE;
5226 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5227 local_adv |= ADVERTISE_1000XPSE_ASYM;
5228
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005229 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08005230 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005231 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08005232 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233
Matt Carlson859edb22011-12-08 14:40:16 +00005234 tp->link_config.rmt_adv =
5235 mii_adv_to_ethtool_adv_x(remote_adv);
5236
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 tg3_setup_flow_control(tp, local_adv, remote_adv);
5238 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005239 tp->serdes_counter = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005240 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005241 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07005242 if (tp->serdes_counter)
5243 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 else {
5245 if (workaround) {
5246 u32 val = serdes_cfg;
5247
5248 if (port_a)
5249 val |= 0xc010000;
5250 else
5251 val |= 0x4010000;
5252
5253 tw32_f(MAC_SERDES_CFG, val);
5254 }
5255
Matt Carlsonc98f6e32007-12-20 20:08:32 -08005256 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 udelay(40);
5258
5259 /* Link parallel detection - link is up */
5260 /* only if we have PCS_SYNC and not */
5261 /* receiving config code words */
5262 mac_status = tr32(MAC_STATUS);
5263 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5264 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5265 tg3_setup_flow_control(tp, 0, 0);
5266 current_link_up = 1;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005267 tp->phy_flags |=
5268 TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005269 tp->serdes_counter =
5270 SERDES_PARALLEL_DET_TIMEOUT;
5271 } else
5272 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 }
5274 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07005275 } else {
5276 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005277 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278 }
5279
5280out:
5281 return current_link_up;
5282}
5283
5284static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5285{
5286 int current_link_up = 0;
5287
Michael Chan5cf64b8a2007-05-05 12:11:21 -07005288 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005290
5291 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08005292 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04005294
Matt Carlson5be73b42007-12-20 20:09:29 -08005295 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5296 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297
Matt Carlson5be73b42007-12-20 20:09:29 -08005298 if (txflags & ANEG_CFG_PS1)
5299 local_adv |= ADVERTISE_1000XPAUSE;
5300 if (txflags & ANEG_CFG_PS2)
5301 local_adv |= ADVERTISE_1000XPSE_ASYM;
5302
5303 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5304 remote_adv |= LPA_1000XPAUSE;
5305 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5306 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307
Matt Carlson859edb22011-12-08 14:40:16 +00005308 tp->link_config.rmt_adv =
5309 mii_adv_to_ethtool_adv_x(remote_adv);
5310
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311 tg3_setup_flow_control(tp, local_adv, remote_adv);
5312
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 current_link_up = 1;
5314 }
5315 for (i = 0; i < 30; i++) {
5316 udelay(20);
5317 tw32_f(MAC_STATUS,
5318 (MAC_STATUS_SYNC_CHANGED |
5319 MAC_STATUS_CFG_CHANGED));
5320 udelay(40);
5321 if ((tr32(MAC_STATUS) &
5322 (MAC_STATUS_SYNC_CHANGED |
5323 MAC_STATUS_CFG_CHANGED)) == 0)
5324 break;
5325 }
5326
5327 mac_status = tr32(MAC_STATUS);
5328 if (current_link_up == 0 &&
5329 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5330 !(mac_status & MAC_STATUS_RCVD_CFG))
5331 current_link_up = 1;
5332 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08005333 tg3_setup_flow_control(tp, 0, 0);
5334
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335 /* Forcing 1000FD link up. */
5336 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337
5338 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5339 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07005340
5341 tw32_f(MAC_MODE, tp->mac_mode);
5342 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343 }
5344
5345out:
5346 return current_link_up;
5347}
5348
5349static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5350{
5351 u32 orig_pause_cfg;
5352 u16 orig_active_speed;
5353 u8 orig_active_duplex;
5354 u32 mac_status;
5355 int current_link_up;
5356 int i;
5357
Matt Carlson8d018622007-12-20 20:05:44 -08005358 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005359 orig_active_speed = tp->link_config.active_speed;
5360 orig_active_duplex = tp->link_config.active_duplex;
5361
Joe Perches63c3a662011-04-26 08:12:10 +00005362 if (!tg3_flag(tp, HW_AUTONEG) &&
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005363 tp->link_up &&
Joe Perches63c3a662011-04-26 08:12:10 +00005364 tg3_flag(tp, INIT_COMPLETE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365 mac_status = tr32(MAC_STATUS);
5366 mac_status &= (MAC_STATUS_PCS_SYNCED |
5367 MAC_STATUS_SIGNAL_DET |
5368 MAC_STATUS_CFG_CHANGED |
5369 MAC_STATUS_RCVD_CFG);
5370 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5371 MAC_STATUS_SIGNAL_DET)) {
5372 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5373 MAC_STATUS_CFG_CHANGED));
5374 return 0;
5375 }
5376 }
5377
5378 tw32_f(MAC_TX_AUTO_NEG, 0);
5379
5380 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5381 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5382 tw32_f(MAC_MODE, tp->mac_mode);
5383 udelay(40);
5384
Matt Carlson79eb6902010-02-17 15:17:03 +00005385 if (tp->phy_id == TG3_PHY_ID_BCM8002)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386 tg3_init_bcm8002(tp);
5387
5388 /* Enable link change event even when serdes polling. */
5389 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5390 udelay(40);
5391
5392 current_link_up = 0;
Matt Carlson859edb22011-12-08 14:40:16 +00005393 tp->link_config.rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 mac_status = tr32(MAC_STATUS);
5395
Joe Perches63c3a662011-04-26 08:12:10 +00005396 if (tg3_flag(tp, HW_AUTONEG))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5398 else
5399 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5400
Matt Carlson898a56f2009-08-28 14:02:40 +00005401 tp->napi[0].hw_status->status =
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 (SD_STATUS_UPDATED |
Matt Carlson898a56f2009-08-28 14:02:40 +00005403 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404
5405 for (i = 0; i < 100; i++) {
5406 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5407 MAC_STATUS_CFG_CHANGED));
5408 udelay(5);
5409 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07005410 MAC_STATUS_CFG_CHANGED |
5411 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412 break;
5413 }
5414
5415 mac_status = tr32(MAC_STATUS);
5416 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5417 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07005418 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5419 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420 tw32_f(MAC_MODE, (tp->mac_mode |
5421 MAC_MODE_SEND_CONFIGS));
5422 udelay(1);
5423 tw32_f(MAC_MODE, tp->mac_mode);
5424 }
5425 }
5426
5427 if (current_link_up == 1) {
5428 tp->link_config.active_speed = SPEED_1000;
5429 tp->link_config.active_duplex = DUPLEX_FULL;
5430 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5431 LED_CTRL_LNKLED_OVERRIDE |
5432 LED_CTRL_1000MBPS_ON));
5433 } else {
Matt Carlsone7405222012-02-13 15:20:16 +00005434 tp->link_config.active_speed = SPEED_UNKNOWN;
5435 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5437 LED_CTRL_LNKLED_OVERRIDE |
5438 LED_CTRL_TRAFFIC_OVERRIDE));
5439 }
5440
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005441 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
Matt Carlson8d018622007-12-20 20:05:44 -08005442 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 if (orig_pause_cfg != now_pause_cfg ||
5444 orig_active_speed != tp->link_config.active_speed ||
5445 orig_active_duplex != tp->link_config.active_duplex)
5446 tg3_link_report(tp);
5447 }
5448
5449 return 0;
5450}
5451
Michael Chan747e8f82005-07-25 12:33:22 -07005452static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5453{
5454 int current_link_up, err = 0;
5455 u32 bmsr, bmcr;
5456 u16 current_speed;
5457 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08005458 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07005459
5460 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5461 tw32_f(MAC_MODE, tp->mac_mode);
5462 udelay(40);
5463
5464 tw32(MAC_EVENT, 0);
5465
5466 tw32_f(MAC_STATUS,
5467 (MAC_STATUS_SYNC_CHANGED |
5468 MAC_STATUS_CFG_CHANGED |
5469 MAC_STATUS_MI_COMPLETION |
5470 MAC_STATUS_LNKSTATE_CHANGED));
5471 udelay(40);
5472
5473 if (force_reset)
5474 tg3_phy_reset(tp);
5475
5476 current_link_up = 0;
Matt Carlsone7405222012-02-13 15:20:16 +00005477 current_speed = SPEED_UNKNOWN;
5478 current_duplex = DUPLEX_UNKNOWN;
Matt Carlson859edb22011-12-08 14:40:16 +00005479 tp->link_config.rmt_adv = 0;
Michael Chan747e8f82005-07-25 12:33:22 -07005480
5481 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5482 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Joe Perches41535772013-02-16 11:20:04 +00005483 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
Michael Chand4d2c552006-03-20 17:47:20 -08005484 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5485 bmsr |= BMSR_LSTATUS;
5486 else
5487 bmsr &= ~BMSR_LSTATUS;
5488 }
Michael Chan747e8f82005-07-25 12:33:22 -07005489
5490 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5491
5492 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005493 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07005494 /* do nothing, just check for link up at the end */
5495 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson28011cf2011-11-16 18:36:59 -05005496 u32 adv, newadv;
Michael Chan747e8f82005-07-25 12:33:22 -07005497
5498 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
Matt Carlson28011cf2011-11-16 18:36:59 -05005499 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5500 ADVERTISE_1000XPAUSE |
5501 ADVERTISE_1000XPSE_ASYM |
5502 ADVERTISE_SLCT);
Michael Chan747e8f82005-07-25 12:33:22 -07005503
Matt Carlson28011cf2011-11-16 18:36:59 -05005504 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Matt Carlson37f07022011-11-17 14:30:55 +00005505 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
Michael Chan747e8f82005-07-25 12:33:22 -07005506
Matt Carlson28011cf2011-11-16 18:36:59 -05005507 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5508 tg3_writephy(tp, MII_ADVERTISE, newadv);
Michael Chan747e8f82005-07-25 12:33:22 -07005509 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5510 tg3_writephy(tp, MII_BMCR, bmcr);
5511
5512 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07005513 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005514 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005515
5516 return err;
5517 }
5518 } else {
5519 u32 new_bmcr;
5520
5521 bmcr &= ~BMCR_SPEED1000;
5522 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5523
5524 if (tp->link_config.duplex == DUPLEX_FULL)
5525 new_bmcr |= BMCR_FULLDPLX;
5526
5527 if (new_bmcr != bmcr) {
5528 /* BMCR_SPEED1000 is a reserved bit that needs
5529 * to be set on write.
5530 */
5531 new_bmcr |= BMCR_SPEED1000;
5532
5533 /* Force a linkdown */
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005534 if (tp->link_up) {
Michael Chan747e8f82005-07-25 12:33:22 -07005535 u32 adv;
5536
5537 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5538 adv &= ~(ADVERTISE_1000XFULL |
5539 ADVERTISE_1000XHALF |
5540 ADVERTISE_SLCT);
5541 tg3_writephy(tp, MII_ADVERTISE, adv);
5542 tg3_writephy(tp, MII_BMCR, bmcr |
5543 BMCR_ANRESTART |
5544 BMCR_ANENABLE);
5545 udelay(10);
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005546 tg3_carrier_off(tp);
Michael Chan747e8f82005-07-25 12:33:22 -07005547 }
5548 tg3_writephy(tp, MII_BMCR, new_bmcr);
5549 bmcr = new_bmcr;
5550 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5551 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Joe Perches41535772013-02-16 11:20:04 +00005552 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
Michael Chand4d2c552006-03-20 17:47:20 -08005553 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5554 bmsr |= BMSR_LSTATUS;
5555 else
5556 bmsr &= ~BMSR_LSTATUS;
5557 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005558 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005559 }
5560 }
5561
5562 if (bmsr & BMSR_LSTATUS) {
5563 current_speed = SPEED_1000;
5564 current_link_up = 1;
5565 if (bmcr & BMCR_FULLDPLX)
5566 current_duplex = DUPLEX_FULL;
5567 else
5568 current_duplex = DUPLEX_HALF;
5569
Matt Carlsonef167e22007-12-20 20:10:01 -08005570 local_adv = 0;
5571 remote_adv = 0;
5572
Michael Chan747e8f82005-07-25 12:33:22 -07005573 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08005574 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07005575
5576 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5577 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5578 common = local_adv & remote_adv;
5579 if (common & (ADVERTISE_1000XHALF |
5580 ADVERTISE_1000XFULL)) {
5581 if (common & ADVERTISE_1000XFULL)
5582 current_duplex = DUPLEX_FULL;
5583 else
5584 current_duplex = DUPLEX_HALF;
Matt Carlson859edb22011-12-08 14:40:16 +00005585
5586 tp->link_config.rmt_adv =
5587 mii_adv_to_ethtool_adv_x(remote_adv);
Joe Perches63c3a662011-04-26 08:12:10 +00005588 } else if (!tg3_flag(tp, 5780_CLASS)) {
Matt Carlson57d8b882010-06-05 17:24:35 +00005589 /* Link is up via parallel detect */
Matt Carlson859a588792010-04-05 10:19:28 +00005590 } else {
Michael Chan747e8f82005-07-25 12:33:22 -07005591 current_link_up = 0;
Matt Carlson859a588792010-04-05 10:19:28 +00005592 }
Michael Chan747e8f82005-07-25 12:33:22 -07005593 }
5594 }
5595
Matt Carlsonef167e22007-12-20 20:10:01 -08005596 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5597 tg3_setup_flow_control(tp, local_adv, remote_adv);
5598
Michael Chan747e8f82005-07-25 12:33:22 -07005599 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5600 if (tp->link_config.active_duplex == DUPLEX_HALF)
5601 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5602
5603 tw32_f(MAC_MODE, tp->mac_mode);
5604 udelay(40);
5605
5606 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5607
5608 tp->link_config.active_speed = current_speed;
5609 tp->link_config.active_duplex = current_duplex;
5610
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005611 tg3_test_and_report_link_chg(tp, current_link_up);
Michael Chan747e8f82005-07-25 12:33:22 -07005612 return err;
5613}
5614
5615static void tg3_serdes_parallel_detect(struct tg3 *tp)
5616{
Michael Chan3d3ebe72006-09-27 15:59:15 -07005617 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07005618 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07005619 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07005620 return;
5621 }
Matt Carlsonc6cdf432010-04-05 10:19:26 +00005622
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005623 if (!tp->link_up &&
Michael Chan747e8f82005-07-25 12:33:22 -07005624 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5625 u32 bmcr;
5626
5627 tg3_readphy(tp, MII_BMCR, &bmcr);
5628 if (bmcr & BMCR_ANENABLE) {
5629 u32 phy1, phy2;
5630
5631 /* Select shadow register 0x1f */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005632 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5633 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
Michael Chan747e8f82005-07-25 12:33:22 -07005634
5635 /* Select expansion interrupt status register */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005636 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5637 MII_TG3_DSP_EXP1_INT_STAT);
5638 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5639 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
Michael Chan747e8f82005-07-25 12:33:22 -07005640
5641 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5642 /* We have signal detect and not receiving
5643 * config code words, link is up by parallel
5644 * detection.
5645 */
5646
5647 bmcr &= ~BMCR_ANENABLE;
5648 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5649 tg3_writephy(tp, MII_BMCR, bmcr);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005650 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005651 }
5652 }
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005653 } else if (tp->link_up &&
Matt Carlson859a588792010-04-05 10:19:28 +00005654 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005655 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07005656 u32 phy2;
5657
5658 /* Select expansion interrupt status register */
Matt Carlsonf08aa1a2010-08-02 11:26:05 +00005659 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5660 MII_TG3_DSP_EXP1_INT_STAT);
5661 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
Michael Chan747e8f82005-07-25 12:33:22 -07005662 if (phy2 & 0x20) {
5663 u32 bmcr;
5664
5665 /* Config code words received, turn on autoneg. */
5666 tg3_readphy(tp, MII_BMCR, &bmcr);
5667 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5668
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005669 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chan747e8f82005-07-25 12:33:22 -07005670
5671 }
5672 }
5673}
5674
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5676{
Matt Carlsonf2096f92011-04-05 14:22:48 +00005677 u32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 int err;
5679
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005680 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005681 err = tg3_setup_fiber_phy(tp, force_reset);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00005682 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chan747e8f82005-07-25 12:33:22 -07005683 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Matt Carlson859a588792010-04-05 10:19:28 +00005684 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685 err = tg3_setup_copper_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686
Joe Perches41535772013-02-16 11:20:04 +00005687 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
Matt Carlsonf2096f92011-04-05 14:22:48 +00005688 u32 scale;
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08005689
5690 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5691 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5692 scale = 65;
5693 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5694 scale = 6;
5695 else
5696 scale = 12;
5697
5698 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5699 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5700 tw32(GRC_MISC_CFG, val);
5701 }
5702
Matt Carlsonf2096f92011-04-05 14:22:48 +00005703 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5704 (6 << TX_LENGTHS_IPG_SHIFT);
Joe Perches41535772013-02-16 11:20:04 +00005705 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5706 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlsonf2096f92011-04-05 14:22:48 +00005707 val |= tr32(MAC_TX_LENGTHS) &
5708 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5709 TX_LENGTHS_CNT_DWN_VAL_MSK);
5710
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 if (tp->link_config.active_speed == SPEED_1000 &&
5712 tp->link_config.active_duplex == DUPLEX_HALF)
Matt Carlsonf2096f92011-04-05 14:22:48 +00005713 tw32(MAC_TX_LENGTHS, val |
5714 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715 else
Matt Carlsonf2096f92011-04-05 14:22:48 +00005716 tw32(MAC_TX_LENGTHS, val |
5717 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005718
Joe Perches63c3a662011-04-26 08:12:10 +00005719 if (!tg3_flag(tp, 5705_PLUS)) {
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005720 if (tp->link_up) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07005722 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005723 } else {
5724 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5725 }
5726 }
5727
Joe Perches63c3a662011-04-26 08:12:10 +00005728 if (tg3_flag(tp, ASPM_WORKAROUND)) {
Matt Carlsonf2096f92011-04-05 14:22:48 +00005729 val = tr32(PCIE_PWR_MGMT_THRESH);
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00005730 if (!tp->link_up)
Matt Carlson8ed5d972007-05-07 00:25:49 -07005731 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5732 tp->pwrmgmt_thresh;
5733 else
5734 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5735 tw32(PCIE_PWR_MGMT_THRESH, val);
5736 }
5737
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 return err;
5739}
5740
Matt Carlsonbe947302012-12-03 19:36:57 +00005741/* tp->lock must be held */
Matt Carlson7d41e492012-12-03 19:36:58 +00005742static u64 tg3_refclk_read(struct tg3 *tp)
5743{
5744 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5745 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5746}
5747
5748/* tp->lock must be held */
Matt Carlsonbe947302012-12-03 19:36:57 +00005749static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5750{
5751 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5752 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5753 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5754 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5755}
5756
Matt Carlson7d41e492012-12-03 19:36:58 +00005757static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5758static inline void tg3_full_unlock(struct tg3 *tp);
5759static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5760{
5761 struct tg3 *tp = netdev_priv(dev);
5762
5763 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5764 SOF_TIMESTAMPING_RX_SOFTWARE |
5765 SOF_TIMESTAMPING_SOFTWARE |
5766 SOF_TIMESTAMPING_TX_HARDWARE |
5767 SOF_TIMESTAMPING_RX_HARDWARE |
5768 SOF_TIMESTAMPING_RAW_HARDWARE;
5769
5770 if (tp->ptp_clock)
5771 info->phc_index = ptp_clock_index(tp->ptp_clock);
5772 else
5773 info->phc_index = -1;
5774
5775 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5776
5777 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5778 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5779 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5780 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5781 return 0;
5782}
5783
5784static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5785{
5786 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5787 bool neg_adj = false;
5788 u32 correction = 0;
5789
5790 if (ppb < 0) {
5791 neg_adj = true;
5792 ppb = -ppb;
5793 }
5794
5795 /* Frequency adjustment is performed using hardware with a 24 bit
5796 * accumulator and a programmable correction value. On each clk, the
5797 * correction value gets added to the accumulator and when it
5798 * overflows, the time counter is incremented/decremented.
5799 *
5800 * So conversion from ppb to correction value is
5801 * ppb * (1 << 24) / 1000000000
5802 */
5803 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5804 TG3_EAV_REF_CLK_CORRECT_MASK;
5805
5806 tg3_full_lock(tp, 0);
5807
5808 if (correction)
5809 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5810 TG3_EAV_REF_CLK_CORRECT_EN |
5811 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5812 else
5813 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5814
5815 tg3_full_unlock(tp);
5816
5817 return 0;
5818}
5819
5820static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5821{
5822 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5823
5824 tg3_full_lock(tp, 0);
5825 tp->ptp_adjust += delta;
5826 tg3_full_unlock(tp);
5827
5828 return 0;
5829}
5830
5831static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5832{
5833 u64 ns;
5834 u32 remainder;
5835 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5836
5837 tg3_full_lock(tp, 0);
5838 ns = tg3_refclk_read(tp);
5839 ns += tp->ptp_adjust;
5840 tg3_full_unlock(tp);
5841
5842 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5843 ts->tv_nsec = remainder;
5844
5845 return 0;
5846}
5847
5848static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5849 const struct timespec *ts)
5850{
5851 u64 ns;
5852 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5853
5854 ns = timespec_to_ns(ts);
5855
5856 tg3_full_lock(tp, 0);
5857 tg3_refclk_write(tp, ns);
5858 tp->ptp_adjust = 0;
5859 tg3_full_unlock(tp);
5860
5861 return 0;
5862}
5863
5864static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5865 struct ptp_clock_request *rq, int on)
5866{
5867 return -EOPNOTSUPP;
5868}
5869
5870static const struct ptp_clock_info tg3_ptp_caps = {
5871 .owner = THIS_MODULE,
5872 .name = "tg3 clock",
5873 .max_adj = 250000000,
5874 .n_alarm = 0,
5875 .n_ext_ts = 0,
5876 .n_per_out = 0,
5877 .pps = 0,
5878 .adjfreq = tg3_ptp_adjfreq,
5879 .adjtime = tg3_ptp_adjtime,
5880 .gettime = tg3_ptp_gettime,
5881 .settime = tg3_ptp_settime,
5882 .enable = tg3_ptp_enable,
5883};
5884
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +00005885static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5886 struct skb_shared_hwtstamps *timestamp)
5887{
5888 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5889 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5890 tp->ptp_adjust);
5891}
5892
Matt Carlsonbe947302012-12-03 19:36:57 +00005893/* tp->lock must be held */
5894static void tg3_ptp_init(struct tg3 *tp)
5895{
5896 if (!tg3_flag(tp, PTP_CAPABLE))
5897 return;
5898
5899 /* Initialize the hardware clock to the system time. */
5900 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5901 tp->ptp_adjust = 0;
Matt Carlson7d41e492012-12-03 19:36:58 +00005902 tp->ptp_info = tg3_ptp_caps;
Matt Carlsonbe947302012-12-03 19:36:57 +00005903}
5904
5905/* tp->lock must be held */
5906static void tg3_ptp_resume(struct tg3 *tp)
5907{
5908 if (!tg3_flag(tp, PTP_CAPABLE))
5909 return;
5910
5911 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5912 tp->ptp_adjust = 0;
5913}
5914
5915static void tg3_ptp_fini(struct tg3 *tp)
5916{
5917 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5918 return;
5919
Matt Carlson7d41e492012-12-03 19:36:58 +00005920 ptp_clock_unregister(tp->ptp_clock);
Matt Carlsonbe947302012-12-03 19:36:57 +00005921 tp->ptp_clock = NULL;
5922 tp->ptp_adjust = 0;
5923}
5924
Matt Carlson66cfd1b2010-09-30 10:34:30 +00005925static inline int tg3_irq_sync(struct tg3 *tp)
5926{
5927 return tp->irq_sync;
5928}
5929
Matt Carlson97bd8e42011-04-13 11:05:04 +00005930static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5931{
5932 int i;
5933
5934 dst = (u32 *)((u8 *)dst + off);
5935 for (i = 0; i < len; i += sizeof(u32))
5936 *dst++ = tr32(off + i);
5937}
5938
5939static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5940{
5941 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5942 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5943 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5944 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5945 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5946 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5947 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5948 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5949 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5950 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5951 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5952 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5953 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5954 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5955 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5956 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5957 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5958 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5959 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5960
Joe Perches63c3a662011-04-26 08:12:10 +00005961 if (tg3_flag(tp, SUPPORT_MSIX))
Matt Carlson97bd8e42011-04-13 11:05:04 +00005962 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5963
5964 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5965 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5966 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5967 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5968 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5969 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5970 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5971 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5972
Joe Perches63c3a662011-04-26 08:12:10 +00005973 if (!tg3_flag(tp, 5705_PLUS)) {
Matt Carlson97bd8e42011-04-13 11:05:04 +00005974 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5975 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5976 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5977 }
5978
5979 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5980 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5981 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5982 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5983 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5984
Joe Perches63c3a662011-04-26 08:12:10 +00005985 if (tg3_flag(tp, NVRAM))
Matt Carlson97bd8e42011-04-13 11:05:04 +00005986 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5987}
5988
5989static void tg3_dump_state(struct tg3 *tp)
5990{
5991 int i;
5992 u32 *regs;
5993
5994 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
Joe Perchesb2adaca2013-02-03 17:43:58 +00005995 if (!regs)
Matt Carlson97bd8e42011-04-13 11:05:04 +00005996 return;
Matt Carlson97bd8e42011-04-13 11:05:04 +00005997
Joe Perches63c3a662011-04-26 08:12:10 +00005998 if (tg3_flag(tp, PCI_EXPRESS)) {
Matt Carlson97bd8e42011-04-13 11:05:04 +00005999 /* Read up to but not including private PCI registers */
6000 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6001 regs[i / sizeof(u32)] = tr32(i);
6002 } else
6003 tg3_dump_legacy_regs(tp, regs);
6004
6005 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6006 if (!regs[i + 0] && !regs[i + 1] &&
6007 !regs[i + 2] && !regs[i + 3])
6008 continue;
6009
6010 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6011 i * 4,
6012 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6013 }
6014
6015 kfree(regs);
6016
6017 for (i = 0; i < tp->irq_cnt; i++) {
6018 struct tg3_napi *tnapi = &tp->napi[i];
6019
6020 /* SW status block */
6021 netdev_err(tp->dev,
6022 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6023 i,
6024 tnapi->hw_status->status,
6025 tnapi->hw_status->status_tag,
6026 tnapi->hw_status->rx_jumbo_consumer,
6027 tnapi->hw_status->rx_consumer,
6028 tnapi->hw_status->rx_mini_consumer,
6029 tnapi->hw_status->idx[0].rx_producer,
6030 tnapi->hw_status->idx[0].tx_consumer);
6031
6032 netdev_err(tp->dev,
6033 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6034 i,
6035 tnapi->last_tag, tnapi->last_irq_tag,
6036 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6037 tnapi->rx_rcb_ptr,
6038 tnapi->prodring.rx_std_prod_idx,
6039 tnapi->prodring.rx_std_cons_idx,
6040 tnapi->prodring.rx_jmb_prod_idx,
6041 tnapi->prodring.rx_jmb_cons_idx);
6042 }
6043}
6044
Michael Chandf3e6542006-05-26 17:48:07 -07006045/* This is called whenever we suspect that the system chipset is re-
6046 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6047 * is bogus tx completions. We try to recover by setting the
6048 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6049 * in the workqueue.
6050 */
6051static void tg3_tx_recover(struct tg3 *tp)
6052{
Joe Perches63c3a662011-04-26 08:12:10 +00006053 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
Michael Chandf3e6542006-05-26 17:48:07 -07006054 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6055
Matt Carlson5129c3a2010-04-05 10:19:23 +00006056 netdev_warn(tp->dev,
6057 "The system may be re-ordering memory-mapped I/O "
6058 "cycles to the network device, attempting to recover. "
6059 "Please report the problem to the driver maintainer "
6060 "and include system chipset information.\n");
Michael Chandf3e6542006-05-26 17:48:07 -07006061
6062 spin_lock(&tp->lock);
Joe Perches63c3a662011-04-26 08:12:10 +00006063 tg3_flag_set(tp, TX_RECOVERY_PENDING);
Michael Chandf3e6542006-05-26 17:48:07 -07006064 spin_unlock(&tp->lock);
6065}
6066
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006067static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
Michael Chan1b2a7202006-08-07 21:46:02 -07006068{
Matt Carlsonf65aac12010-08-02 11:26:03 +00006069 /* Tell compiler to fetch tx indices from memory. */
6070 barrier();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006071 return tnapi->tx_pending -
6072 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
Michael Chan1b2a7202006-08-07 21:46:02 -07006073}
6074
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075/* Tigon3 never reports partial packet sends. So we do not
6076 * need special logic to handle SKBs that have not had all
6077 * of their frags sent yet, like SunGEM does.
6078 */
Matt Carlson17375d22009-08-28 14:02:18 +00006079static void tg3_tx(struct tg3_napi *tnapi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080{
Matt Carlson17375d22009-08-28 14:02:18 +00006081 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00006082 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006083 u32 sw_idx = tnapi->tx_cons;
Matt Carlsonfe5f5782009-09-01 13:09:39 +00006084 struct netdev_queue *txq;
6085 int index = tnapi - tp->napi;
Tom Herbert298376d2011-11-28 16:33:30 +00006086 unsigned int pkts_compl = 0, bytes_compl = 0;
Matt Carlsonfe5f5782009-09-01 13:09:39 +00006087
Joe Perches63c3a662011-04-26 08:12:10 +00006088 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonfe5f5782009-09-01 13:09:39 +00006089 index--;
6090
6091 txq = netdev_get_tx_queue(tp->dev, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006092
6093 while (sw_idx != hw_idx) {
Matt Carlsondf8944c2011-07-27 14:20:46 +00006094 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006095 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07006096 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006097
Michael Chandf3e6542006-05-26 17:48:07 -07006098 if (unlikely(skb == NULL)) {
6099 tg3_tx_recover(tp);
6100 return;
6101 }
6102
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +00006103 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6104 struct skb_shared_hwtstamps timestamp;
6105 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6106 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6107
6108 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6109
6110 skb_tstamp_tx(skb, &timestamp);
6111 }
6112
Alexander Duyckf4188d82009-12-02 16:48:38 +00006113 pci_unmap_single(tp->pdev,
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00006114 dma_unmap_addr(ri, mapping),
Alexander Duyckf4188d82009-12-02 16:48:38 +00006115 skb_headlen(skb),
6116 PCI_DMA_TODEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006117
6118 ri->skb = NULL;
6119
Matt Carlsone01ee142011-07-27 14:20:50 +00006120 while (ri->fragmented) {
6121 ri->fragmented = false;
6122 sw_idx = NEXT_TX(sw_idx);
6123 ri = &tnapi->tx_buffers[sw_idx];
6124 }
6125
Linus Torvalds1da177e2005-04-16 15:20:36 -07006126 sw_idx = NEXT_TX(sw_idx);
6127
6128 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006129 ri = &tnapi->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07006130 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6131 tx_bug = 1;
Alexander Duyckf4188d82009-12-02 16:48:38 +00006132
6133 pci_unmap_page(tp->pdev,
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00006134 dma_unmap_addr(ri, mapping),
Eric Dumazet9e903e02011-10-18 21:00:24 +00006135 skb_frag_size(&skb_shinfo(skb)->frags[i]),
Alexander Duyckf4188d82009-12-02 16:48:38 +00006136 PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00006137
6138 while (ri->fragmented) {
6139 ri->fragmented = false;
6140 sw_idx = NEXT_TX(sw_idx);
6141 ri = &tnapi->tx_buffers[sw_idx];
6142 }
6143
Linus Torvalds1da177e2005-04-16 15:20:36 -07006144 sw_idx = NEXT_TX(sw_idx);
6145 }
6146
Tom Herbert298376d2011-11-28 16:33:30 +00006147 pkts_compl++;
6148 bytes_compl += skb->len;
6149
David S. Millerf47c11e2005-06-24 20:18:35 -07006150 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07006151
6152 if (unlikely(tx_bug)) {
6153 tg3_tx_recover(tp);
6154 return;
6155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006156 }
6157
Tom Herbert5cb917b2012-03-05 19:53:50 +00006158 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
Tom Herbert298376d2011-11-28 16:33:30 +00006159
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006160 tnapi->tx_cons = sw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006161
Michael Chan1b2a7202006-08-07 21:46:02 -07006162 /* Need to make the tx_cons update visible to tg3_start_xmit()
6163 * before checking for netif_queue_stopped(). Without the
6164 * memory barrier, there is a small possibility that tg3_start_xmit()
6165 * will miss it and cause the queue to be stopped forever.
6166 */
6167 smp_mb();
6168
Matt Carlsonfe5f5782009-09-01 13:09:39 +00006169 if (unlikely(netif_tx_queue_stopped(txq) &&
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006170 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
Matt Carlsonfe5f5782009-09-01 13:09:39 +00006171 __netif_tx_lock(txq, smp_processor_id());
6172 if (netif_tx_queue_stopped(txq) &&
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006173 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
Matt Carlsonfe5f5782009-09-01 13:09:39 +00006174 netif_tx_wake_queue(txq);
6175 __netif_tx_unlock(txq);
Michael Chan51b91462005-09-01 17:41:28 -07006176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006177}
6178
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006179static void tg3_frag_free(bool is_frag, void *data)
6180{
6181 if (is_frag)
6182 put_page(virt_to_head_page(data));
6183 else
6184 kfree(data);
6185}
6186
Eric Dumazet9205fd92011-11-18 06:47:01 +00006187static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00006188{
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006189 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6191
Eric Dumazet9205fd92011-11-18 06:47:01 +00006192 if (!ri->data)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00006193 return;
6194
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00006195 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
Matt Carlson2b2cdb62009-11-13 13:03:48 +00006196 map_sz, PCI_DMA_FROMDEVICE);
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00006197 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006198 ri->data = NULL;
Matt Carlson2b2cdb62009-11-13 13:03:48 +00006199}
6200
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006201
Linus Torvalds1da177e2005-04-16 15:20:36 -07006202/* Returns size of skb allocated or < 0 on error.
6203 *
6204 * We only need to fill in the address because the other members
6205 * of the RX descriptor are invariant, see tg3_init_rings.
6206 *
6207 * Note the purposeful assymetry of cpu vs. chip accesses. For
6208 * posting buffers we only dirty the first cache line of the RX
6209 * descriptor (containing the address). Whereas for the RX status
6210 * buffers the cpu only reads the last cacheline of the RX descriptor
6211 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6212 */
Eric Dumazet9205fd92011-11-18 06:47:01 +00006213static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006214 u32 opaque_key, u32 dest_idx_unmasked,
6215 unsigned int *frag_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006216{
6217 struct tg3_rx_buffer_desc *desc;
Matt Carlsonf94e2902010-10-14 10:37:42 +00006218 struct ring_info *map;
Eric Dumazet9205fd92011-11-18 06:47:01 +00006219 u8 *data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006220 dma_addr_t mapping;
Eric Dumazet9205fd92011-11-18 06:47:01 +00006221 int skb_size, data_size, dest_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006222
Linus Torvalds1da177e2005-04-16 15:20:36 -07006223 switch (opaque_key) {
6224 case RXD_OPAQUE_RING_STD:
Matt Carlson2c49a442010-09-30 10:34:35 +00006225 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
Matt Carlson21f581a2009-08-28 14:00:25 +00006226 desc = &tpr->rx_std[dest_idx];
6227 map = &tpr->rx_std_buffers[dest_idx];
Eric Dumazet9205fd92011-11-18 06:47:01 +00006228 data_size = tp->rx_pkt_map_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229 break;
6230
6231 case RXD_OPAQUE_RING_JUMBO:
Matt Carlson2c49a442010-09-30 10:34:35 +00006232 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
Matt Carlson79ed5ac2009-08-28 14:00:55 +00006233 desc = &tpr->rx_jmb[dest_idx].std;
Matt Carlson21f581a2009-08-28 14:00:25 +00006234 map = &tpr->rx_jmb_buffers[dest_idx];
Eric Dumazet9205fd92011-11-18 06:47:01 +00006235 data_size = TG3_RX_JMB_MAP_SZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006236 break;
6237
6238 default:
6239 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07006240 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006241
6242 /* Do not overwrite any of the map or rp information
6243 * until we are sure we can commit to a new buffer.
6244 *
6245 * Callers depend upon this behavior and assume that
6246 * we leave everything unchanged if we fail.
6247 */
Eric Dumazet9205fd92011-11-18 06:47:01 +00006248 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00006250 if (skb_size <= PAGE_SIZE) {
6251 data = netdev_alloc_frag(skb_size);
6252 *frag_size = skb_size;
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006253 } else {
6254 data = kmalloc(skb_size, GFP_ATOMIC);
6255 *frag_size = 0;
6256 }
Eric Dumazet9205fd92011-11-18 06:47:01 +00006257 if (!data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006258 return -ENOMEM;
6259
Eric Dumazet9205fd92011-11-18 06:47:01 +00006260 mapping = pci_map_single(tp->pdev,
6261 data + TG3_RX_OFFSET(tp),
6262 data_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006263 PCI_DMA_FROMDEVICE);
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006264 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
Eric Dumazeta1e8b3072012-05-18 21:33:39 +00006265 tg3_frag_free(skb_size <= PAGE_SIZE, data);
Matt Carlsona21771d2009-11-02 14:25:31 +00006266 return -EIO;
6267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006268
Eric Dumazet9205fd92011-11-18 06:47:01 +00006269 map->data = data;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00006270 dma_unmap_addr_set(map, mapping, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272 desc->addr_hi = ((u64)mapping >> 32);
6273 desc->addr_lo = ((u64)mapping & 0xffffffff);
6274
Eric Dumazet9205fd92011-11-18 06:47:01 +00006275 return data_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006276}
6277
6278/* We only need to move over in the address because the other
6279 * members of the RX descriptor are invariant. See notes above
Eric Dumazet9205fd92011-11-18 06:47:01 +00006280 * tg3_alloc_rx_data for full details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281 */
Matt Carlsona3896162009-11-13 13:03:44 +00006282static void tg3_recycle_rx(struct tg3_napi *tnapi,
6283 struct tg3_rx_prodring_set *dpr,
6284 u32 opaque_key, int src_idx,
6285 u32 dest_idx_unmasked)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006286{
Matt Carlson17375d22009-08-28 14:02:18 +00006287 struct tg3 *tp = tnapi->tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006288 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6289 struct ring_info *src_map, *dest_map;
Matt Carlson8fea32b2010-09-15 08:59:58 +00006290 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
Matt Carlsonc6cdf432010-04-05 10:19:26 +00006291 int dest_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292
6293 switch (opaque_key) {
6294 case RXD_OPAQUE_RING_STD:
Matt Carlson2c49a442010-09-30 10:34:35 +00006295 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
Matt Carlsona3896162009-11-13 13:03:44 +00006296 dest_desc = &dpr->rx_std[dest_idx];
6297 dest_map = &dpr->rx_std_buffers[dest_idx];
6298 src_desc = &spr->rx_std[src_idx];
6299 src_map = &spr->rx_std_buffers[src_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006300 break;
6301
6302 case RXD_OPAQUE_RING_JUMBO:
Matt Carlson2c49a442010-09-30 10:34:35 +00006303 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
Matt Carlsona3896162009-11-13 13:03:44 +00006304 dest_desc = &dpr->rx_jmb[dest_idx].std;
6305 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6306 src_desc = &spr->rx_jmb[src_idx].std;
6307 src_map = &spr->rx_jmb_buffers[src_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006308 break;
6309
6310 default:
6311 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07006312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006313
Eric Dumazet9205fd92011-11-18 06:47:01 +00006314 dest_map->data = src_map->data;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00006315 dma_unmap_addr_set(dest_map, mapping,
6316 dma_unmap_addr(src_map, mapping));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006317 dest_desc->addr_hi = src_desc->addr_hi;
6318 dest_desc->addr_lo = src_desc->addr_lo;
Matt Carlsone92967b2010-02-12 14:47:06 +00006319
6320 /* Ensure that the update to the skb happens after the physical
6321 * addresses have been transferred to the new BD location.
6322 */
6323 smp_wmb();
6324
Eric Dumazet9205fd92011-11-18 06:47:01 +00006325 src_map->data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006326}
6327
Linus Torvalds1da177e2005-04-16 15:20:36 -07006328/* The RX ring scheme is composed of multiple rings which post fresh
6329 * buffers to the chip, and one special ring the chip uses to report
6330 * status back to the host.
6331 *
6332 * The special ring reports the status of received packets to the
6333 * host. The chip does not write into the original descriptor the
6334 * RX buffer was obtained from. The chip simply takes the original
6335 * descriptor as provided by the host, updates the status and length
6336 * field, then writes this into the next status ring entry.
6337 *
6338 * Each ring the host uses to post buffers to the chip is described
6339 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6340 * it is first placed into the on-chip ram. When the packet's length
6341 * is known, it walks down the TG3_BDINFO entries to select the ring.
6342 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6343 * which is within the range of the new packet's length is chosen.
6344 *
6345 * The "separate ring for rx status" scheme may sound queer, but it makes
6346 * sense from a cache coherency perspective. If only the host writes
6347 * to the buffer post rings, and only the chip writes to the rx status
6348 * rings, then cache lines never move beyond shared-modified state.
6349 * If both the host and chip were to write into the same ring, cache line
6350 * eviction could occur since both entities want it in an exclusive state.
6351 */
Matt Carlson17375d22009-08-28 14:02:18 +00006352static int tg3_rx(struct tg3_napi *tnapi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006353{
Matt Carlson17375d22009-08-28 14:02:18 +00006354 struct tg3 *tp = tnapi->tp;
Michael Chanf92905d2006-06-29 20:14:29 -07006355 u32 work_mask, rx_std_posted = 0;
Matt Carlson43619352009-11-13 13:03:47 +00006356 u32 std_prod_idx, jmb_prod_idx;
Matt Carlson72334482009-08-28 14:03:01 +00006357 u32 sw_idx = tnapi->rx_rcb_ptr;
Michael Chan483ba502005-04-25 15:14:03 -07006358 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006359 int received;
Matt Carlson8fea32b2010-09-15 08:59:58 +00006360 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00006362 hw_idx = *(tnapi->rx_rcb_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363 /*
6364 * We need to order the read of hw_idx and the read of
6365 * the opaque cookie.
6366 */
6367 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006368 work_mask = 0;
6369 received = 0;
Matt Carlson43619352009-11-13 13:03:47 +00006370 std_prod_idx = tpr->rx_std_prod_idx;
6371 jmb_prod_idx = tpr->rx_jmb_prod_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006372 while (sw_idx != hw_idx && budget > 0) {
Matt Carlsonafc081f2009-11-13 13:03:43 +00006373 struct ring_info *ri;
Matt Carlson72334482009-08-28 14:03:01 +00006374 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006375 unsigned int len;
6376 struct sk_buff *skb;
6377 dma_addr_t dma_addr;
6378 u32 opaque_key, desc_idx, *post_ptr;
Eric Dumazet9205fd92011-11-18 06:47:01 +00006379 u8 *data;
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +00006380 u64 tstamp = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381
6382 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6383 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6384 if (opaque_key == RXD_OPAQUE_RING_STD) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00006385 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00006386 dma_addr = dma_unmap_addr(ri, mapping);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006387 data = ri->data;
Matt Carlson43619352009-11-13 13:03:47 +00006388 post_ptr = &std_prod_idx;
Michael Chanf92905d2006-06-29 20:14:29 -07006389 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006390 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00006391 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00006392 dma_addr = dma_unmap_addr(ri, mapping);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006393 data = ri->data;
Matt Carlson43619352009-11-13 13:03:47 +00006394 post_ptr = &jmb_prod_idx;
Matt Carlson21f581a2009-08-28 14:00:25 +00006395 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396 goto next_pkt_nopost;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006397
6398 work_mask |= opaque_key;
6399
6400 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6401 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6402 drop_it:
Matt Carlsona3896162009-11-13 13:03:44 +00006403 tg3_recycle_rx(tnapi, tpr, opaque_key,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006404 desc_idx, *post_ptr);
6405 drop_it_no_recycle:
6406 /* Other statistics kept track of by card. */
Eric Dumazetb0057c52010-10-10 19:55:52 +00006407 tp->rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006408 goto next_pkt;
6409 }
6410
Eric Dumazet9205fd92011-11-18 06:47:01 +00006411 prefetch(data + TG3_RX_OFFSET(tp));
Matt Carlsonad829262008-11-21 17:16:16 -08006412 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6413 ETH_FCS_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +00006415 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6416 RXD_FLAG_PTPSTAT_PTPV1 ||
6417 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6418 RXD_FLAG_PTPSTAT_PTPV2) {
6419 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6420 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6421 }
6422
Matt Carlsond2757fc2010-04-12 06:58:27 +00006423 if (len > TG3_RX_COPY_THRESH(tp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006424 int skb_size;
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006425 unsigned int frag_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006426
Eric Dumazet9205fd92011-11-18 06:47:01 +00006427 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006428 *post_ptr, &frag_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006429 if (skb_size < 0)
6430 goto drop_it;
6431
Matt Carlson287be122009-08-28 13:58:46 +00006432 pci_unmap_single(tp->pdev, dma_addr, skb_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006433 PCI_DMA_FROMDEVICE);
6434
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006435 skb = build_skb(data, frag_size);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006436 if (!skb) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00006437 tg3_frag_free(frag_size != 0, data);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006438 goto drop_it_no_recycle;
6439 }
6440 skb_reserve(skb, TG3_RX_OFFSET(tp));
6441 /* Ensure that the update to the data happens
Matt Carlson61e800c2010-02-17 15:16:54 +00006442 * after the usage of the old DMA mapping.
6443 */
6444 smp_wmb();
6445
Eric Dumazet9205fd92011-11-18 06:47:01 +00006446 ri->data = NULL;
Matt Carlson61e800c2010-02-17 15:16:54 +00006447
Linus Torvalds1da177e2005-04-16 15:20:36 -07006448 } else {
Matt Carlsona3896162009-11-13 13:03:44 +00006449 tg3_recycle_rx(tnapi, tpr, opaque_key,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450 desc_idx, *post_ptr);
6451
Eric Dumazet9205fd92011-11-18 06:47:01 +00006452 skb = netdev_alloc_skb(tp->dev,
6453 len + TG3_RAW_IP_ALIGN);
6454 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455 goto drop_it_no_recycle;
6456
Eric Dumazet9205fd92011-11-18 06:47:01 +00006457 skb_reserve(skb, TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006458 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Eric Dumazet9205fd92011-11-18 06:47:01 +00006459 memcpy(skb->data,
6460 data + TG3_RX_OFFSET(tp),
6461 len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006462 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006463 }
6464
Eric Dumazet9205fd92011-11-18 06:47:01 +00006465 skb_put(skb, len);
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +00006466 if (tstamp)
6467 tg3_hwclock_to_timestamp(tp, tstamp,
6468 skb_hwtstamps(skb));
6469
Michał Mirosławdc668912011-04-07 03:35:07 +00006470 if ((tp->dev->features & NETIF_F_RXCSUM) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6472 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6473 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6474 skb->ip_summed = CHECKSUM_UNNECESSARY;
6475 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -07006476 skb_checksum_none_assert(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006477
6478 skb->protocol = eth_type_trans(skb, tp->dev);
Matt Carlsonf7b493e2009-02-25 14:21:52 +00006479
6480 if (len > (tp->dev->mtu + ETH_HLEN) &&
6481 skb->protocol != htons(ETH_P_8021Q)) {
6482 dev_kfree_skb(skb);
Eric Dumazetb0057c52010-10-10 19:55:52 +00006483 goto drop_it_no_recycle;
Matt Carlsonf7b493e2009-02-25 14:21:52 +00006484 }
6485
Matt Carlson9dc7a112010-04-12 06:58:28 +00006486 if (desc->type_flags & RXD_FLAG_VLAN &&
Matt Carlsonbf933c82011-01-25 15:58:49 +00006487 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6488 __vlan_hwaccel_put_tag(skb,
6489 desc->err_vlan & RXD_VLAN_MASK);
Matt Carlson9dc7a112010-04-12 06:58:28 +00006490
Matt Carlsonbf933c82011-01-25 15:58:49 +00006491 napi_gro_receive(&tnapi->napi, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492
Linus Torvalds1da177e2005-04-16 15:20:36 -07006493 received++;
6494 budget--;
6495
6496next_pkt:
6497 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07006498
6499 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006500 tpr->rx_std_prod_idx = std_prod_idx &
6501 tp->rx_std_ring_mask;
Matt Carlson86cfe4f2010-01-12 10:11:37 +00006502 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6503 tpr->rx_std_prod_idx);
Michael Chanf92905d2006-06-29 20:14:29 -07006504 work_mask &= ~RXD_OPAQUE_RING_STD;
6505 rx_std_posted = 0;
6506 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006507next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07006508 sw_idx++;
Matt Carlson7cb32cf2010-09-30 10:34:36 +00006509 sw_idx &= tp->rx_ret_ring_mask;
Michael Chan52f6d692005-04-25 15:14:32 -07006510
6511 /* Refresh hw_idx to see if there is new work */
6512 if (sw_idx == hw_idx) {
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00006513 hw_idx = *(tnapi->rx_rcb_prod_idx);
Michael Chan52f6d692005-04-25 15:14:32 -07006514 rmb();
6515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516 }
6517
6518 /* ACK the status ring. */
Matt Carlson72334482009-08-28 14:03:01 +00006519 tnapi->rx_rcb_ptr = sw_idx;
6520 tw32_rx_mbox(tnapi->consmbox, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006521
6522 /* Refill RX ring(s). */
Joe Perches63c3a662011-04-26 08:12:10 +00006523 if (!tg3_flag(tp, ENABLE_RSS)) {
Michael Chan6541b802012-03-04 14:48:14 +00006524 /* Sync BD data before updating mailbox */
6525 wmb();
6526
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006527 if (work_mask & RXD_OPAQUE_RING_STD) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006528 tpr->rx_std_prod_idx = std_prod_idx &
6529 tp->rx_std_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006530 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6531 tpr->rx_std_prod_idx);
6532 }
6533 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
Matt Carlson2c49a442010-09-30 10:34:35 +00006534 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6535 tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006536 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6537 tpr->rx_jmb_prod_idx);
6538 }
6539 mmiowb();
6540 } else if (work_mask) {
6541 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6542 * updated before the producer indices can be updated.
6543 */
6544 smp_wmb();
6545
Matt Carlson2c49a442010-09-30 10:34:35 +00006546 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6547 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006548
Michael Chan7ae52892012-03-21 15:38:33 +00006549 if (tnapi != &tp->napi[1]) {
6550 tp->rx_refill = true;
Matt Carlsone4af1af2010-02-12 14:47:05 +00006551 napi_schedule(&tp->napi[1].napi);
Michael Chan7ae52892012-03-21 15:38:33 +00006552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006554
6555 return received;
6556}
6557
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006558static void tg3_poll_link(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006559{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006560 /* handle link change and other phy events */
Joe Perches63c3a662011-04-26 08:12:10 +00006561 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006562 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6563
Linus Torvalds1da177e2005-04-16 15:20:36 -07006564 if (sblk->status & SD_STATUS_LINK_CHG) {
6565 sblk->status = SD_STATUS_UPDATED |
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006566 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07006567 spin_lock(&tp->lock);
Joe Perches63c3a662011-04-26 08:12:10 +00006568 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsondd477002008-05-25 23:45:58 -07006569 tw32_f(MAC_STATUS,
6570 (MAC_STATUS_SYNC_CHANGED |
6571 MAC_STATUS_CFG_CHANGED |
6572 MAC_STATUS_MI_COMPLETION |
6573 MAC_STATUS_LNKSTATE_CHANGED));
6574 udelay(40);
6575 } else
6576 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07006577 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578 }
6579 }
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006580}
6581
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006582static int tg3_rx_prodring_xfer(struct tg3 *tp,
6583 struct tg3_rx_prodring_set *dpr,
6584 struct tg3_rx_prodring_set *spr)
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006585{
6586 u32 si, di, cpycnt, src_prod_idx;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006587 int i, err = 0;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006588
6589 while (1) {
6590 src_prod_idx = spr->rx_std_prod_idx;
6591
6592 /* Make sure updates to the rx_std_buffers[] entries and the
6593 * standard producer index are seen in the correct order.
6594 */
6595 smp_rmb();
6596
6597 if (spr->rx_std_cons_idx == src_prod_idx)
6598 break;
6599
6600 if (spr->rx_std_cons_idx < src_prod_idx)
6601 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6602 else
Matt Carlson2c49a442010-09-30 10:34:35 +00006603 cpycnt = tp->rx_std_ring_mask + 1 -
6604 spr->rx_std_cons_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006605
Matt Carlson2c49a442010-09-30 10:34:35 +00006606 cpycnt = min(cpycnt,
6607 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006608
6609 si = spr->rx_std_cons_idx;
6610 di = dpr->rx_std_prod_idx;
6611
Matt Carlsone92967b2010-02-12 14:47:06 +00006612 for (i = di; i < di + cpycnt; i++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00006613 if (dpr->rx_std_buffers[i].data) {
Matt Carlsone92967b2010-02-12 14:47:06 +00006614 cpycnt = i - di;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006615 err = -ENOSPC;
Matt Carlsone92967b2010-02-12 14:47:06 +00006616 break;
6617 }
6618 }
6619
6620 if (!cpycnt)
6621 break;
6622
6623 /* Ensure that updates to the rx_std_buffers ring and the
6624 * shadowed hardware producer ring from tg3_recycle_skb() are
6625 * ordered correctly WRT the skb check above.
6626 */
6627 smp_rmb();
6628
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006629 memcpy(&dpr->rx_std_buffers[di],
6630 &spr->rx_std_buffers[si],
6631 cpycnt * sizeof(struct ring_info));
6632
6633 for (i = 0; i < cpycnt; i++, di++, si++) {
6634 struct tg3_rx_buffer_desc *sbd, *dbd;
6635 sbd = &spr->rx_std[si];
6636 dbd = &dpr->rx_std[di];
6637 dbd->addr_hi = sbd->addr_hi;
6638 dbd->addr_lo = sbd->addr_lo;
6639 }
6640
Matt Carlson2c49a442010-09-30 10:34:35 +00006641 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6642 tp->rx_std_ring_mask;
6643 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6644 tp->rx_std_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006645 }
6646
6647 while (1) {
6648 src_prod_idx = spr->rx_jmb_prod_idx;
6649
6650 /* Make sure updates to the rx_jmb_buffers[] entries and
6651 * the jumbo producer index are seen in the correct order.
6652 */
6653 smp_rmb();
6654
6655 if (spr->rx_jmb_cons_idx == src_prod_idx)
6656 break;
6657
6658 if (spr->rx_jmb_cons_idx < src_prod_idx)
6659 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6660 else
Matt Carlson2c49a442010-09-30 10:34:35 +00006661 cpycnt = tp->rx_jmb_ring_mask + 1 -
6662 spr->rx_jmb_cons_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006663
6664 cpycnt = min(cpycnt,
Matt Carlson2c49a442010-09-30 10:34:35 +00006665 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006666
6667 si = spr->rx_jmb_cons_idx;
6668 di = dpr->rx_jmb_prod_idx;
6669
Matt Carlsone92967b2010-02-12 14:47:06 +00006670 for (i = di; i < di + cpycnt; i++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00006671 if (dpr->rx_jmb_buffers[i].data) {
Matt Carlsone92967b2010-02-12 14:47:06 +00006672 cpycnt = i - di;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006673 err = -ENOSPC;
Matt Carlsone92967b2010-02-12 14:47:06 +00006674 break;
6675 }
6676 }
6677
6678 if (!cpycnt)
6679 break;
6680
6681 /* Ensure that updates to the rx_jmb_buffers ring and the
6682 * shadowed hardware producer ring from tg3_recycle_skb() are
6683 * ordered correctly WRT the skb check above.
6684 */
6685 smp_rmb();
6686
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006687 memcpy(&dpr->rx_jmb_buffers[di],
6688 &spr->rx_jmb_buffers[si],
6689 cpycnt * sizeof(struct ring_info));
6690
6691 for (i = 0; i < cpycnt; i++, di++, si++) {
6692 struct tg3_rx_buffer_desc *sbd, *dbd;
6693 sbd = &spr->rx_jmb[si].std;
6694 dbd = &dpr->rx_jmb[di].std;
6695 dbd->addr_hi = sbd->addr_hi;
6696 dbd->addr_lo = sbd->addr_lo;
6697 }
6698
Matt Carlson2c49a442010-09-30 10:34:35 +00006699 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6700 tp->rx_jmb_ring_mask;
6701 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6702 tp->rx_jmb_ring_mask;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006703 }
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006704
6705 return err;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006706}
6707
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006708static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6709{
6710 struct tg3 *tp = tnapi->tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006711
6712 /* run TX completion thread */
Matt Carlsonf3f3f272009-08-28 14:03:21 +00006713 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
Matt Carlson17375d22009-08-28 14:02:18 +00006714 tg3_tx(tnapi);
Joe Perches63c3a662011-04-26 08:12:10 +00006715 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
Michael Chan4fd7ab52007-10-12 01:39:50 -07006716 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006717 }
6718
Matt Carlsonf891ea12012-04-24 13:37:01 +00006719 if (!tnapi->rx_rcb_prod_idx)
6720 return work_done;
6721
Linus Torvalds1da177e2005-04-16 15:20:36 -07006722 /* run RX thread, within the bounds set by NAPI.
6723 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006724 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725 */
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00006726 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
Matt Carlson17375d22009-08-28 14:02:18 +00006727 work_done += tg3_rx(tnapi, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728
Joe Perches63c3a662011-04-26 08:12:10 +00006729 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
Matt Carlson8fea32b2010-09-15 08:59:58 +00006730 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006731 int i, err = 0;
Matt Carlsone4af1af2010-02-12 14:47:05 +00006732 u32 std_prod_idx = dpr->rx_std_prod_idx;
6733 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006734
Michael Chan7ae52892012-03-21 15:38:33 +00006735 tp->rx_refill = false;
Michael Chan91024262012-09-28 07:12:38 +00006736 for (i = 1; i <= tp->rxq_cnt; i++)
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006737 err |= tg3_rx_prodring_xfer(tp, dpr,
Matt Carlson8fea32b2010-09-15 08:59:58 +00006738 &tp->napi[i].prodring);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006739
6740 wmb();
6741
Matt Carlsone4af1af2010-02-12 14:47:05 +00006742 if (std_prod_idx != dpr->rx_std_prod_idx)
6743 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6744 dpr->rx_std_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006745
Matt Carlsone4af1af2010-02-12 14:47:05 +00006746 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6747 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6748 dpr->rx_jmb_prod_idx);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006749
6750 mmiowb();
Matt Carlsonf89f38b2010-02-12 14:47:07 +00006751
6752 if (err)
6753 tw32_f(HOSTCC_MODE, tp->coal_now);
Matt Carlsonb196c7e2009-11-13 13:03:50 +00006754 }
6755
David S. Miller6f535762007-10-11 18:08:29 -07006756 return work_done;
6757}
David S. Millerf7383c22005-05-18 22:50:53 -07006758
Matt Carlsondb219972011-11-04 09:15:03 +00006759static inline void tg3_reset_task_schedule(struct tg3 *tp)
6760{
6761 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6762 schedule_work(&tp->reset_task);
6763}
6764
6765static inline void tg3_reset_task_cancel(struct tg3 *tp)
6766{
6767 cancel_work_sync(&tp->reset_task);
6768 tg3_flag_clear(tp, RESET_TASK_PENDING);
Matt Carlsonc7101352012-02-22 12:35:20 +00006769 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
Matt Carlsondb219972011-11-04 09:15:03 +00006770}
6771
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006772static int tg3_poll_msix(struct napi_struct *napi, int budget)
6773{
6774 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6775 struct tg3 *tp = tnapi->tp;
6776 int work_done = 0;
6777 struct tg3_hw_status *sblk = tnapi->hw_status;
6778
6779 while (1) {
6780 work_done = tg3_poll_work(tnapi, work_done, budget);
6781
Joe Perches63c3a662011-04-26 08:12:10 +00006782 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006783 goto tx_recovery;
6784
6785 if (unlikely(work_done >= budget))
6786 break;
6787
Matt Carlsonc6cdf432010-04-05 10:19:26 +00006788 /* tp->last_tag is used in tg3_int_reenable() below
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006789 * to tell the hw how much work has been processed,
6790 * so we must read it before checking for more work.
6791 */
6792 tnapi->last_tag = sblk->status_tag;
6793 tnapi->last_irq_tag = tnapi->last_tag;
6794 rmb();
6795
6796 /* check for RX/TX work to do */
Matt Carlson6d40db72010-04-05 10:19:20 +00006797 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6798 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
Michael Chan7ae52892012-03-21 15:38:33 +00006799
6800 /* This test here is not race free, but will reduce
6801 * the number of interrupts by looping again.
6802 */
6803 if (tnapi == &tp->napi[1] && tp->rx_refill)
6804 continue;
6805
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006806 napi_complete(napi);
6807 /* Reenable interrupts. */
6808 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
Michael Chan7ae52892012-03-21 15:38:33 +00006809
6810 /* This test here is synchronized by napi_schedule()
6811 * and napi_complete() to close the race condition.
6812 */
6813 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6814 tw32(HOSTCC_MODE, tp->coalesce_mode |
6815 HOSTCC_MODE_ENABLE |
6816 tnapi->coal_now);
6817 }
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006818 mmiowb();
6819 break;
6820 }
6821 }
6822
6823 return work_done;
6824
6825tx_recovery:
6826 /* work_done is guaranteed to be less than budget. */
6827 napi_complete(napi);
Matt Carlsondb219972011-11-04 09:15:03 +00006828 tg3_reset_task_schedule(tp);
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006829 return work_done;
6830}
6831
Matt Carlsone64de4e2011-04-13 11:05:05 +00006832static void tg3_process_error(struct tg3 *tp)
6833{
6834 u32 val;
6835 bool real_error = false;
6836
Joe Perches63c3a662011-04-26 08:12:10 +00006837 if (tg3_flag(tp, ERROR_PROCESSED))
Matt Carlsone64de4e2011-04-13 11:05:05 +00006838 return;
6839
6840 /* Check Flow Attention register */
6841 val = tr32(HOSTCC_FLOW_ATTN);
6842 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6843 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6844 real_error = true;
6845 }
6846
6847 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6848 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6849 real_error = true;
6850 }
6851
6852 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6853 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6854 real_error = true;
6855 }
6856
6857 if (!real_error)
6858 return;
6859
6860 tg3_dump_state(tp);
6861
Joe Perches63c3a662011-04-26 08:12:10 +00006862 tg3_flag_set(tp, ERROR_PROCESSED);
Matt Carlsondb219972011-11-04 09:15:03 +00006863 tg3_reset_task_schedule(tp);
Matt Carlsone64de4e2011-04-13 11:05:05 +00006864}
6865
David S. Miller6f535762007-10-11 18:08:29 -07006866static int tg3_poll(struct napi_struct *napi, int budget)
6867{
Matt Carlson8ef04422009-08-28 14:01:37 +00006868 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6869 struct tg3 *tp = tnapi->tp;
David S. Miller6f535762007-10-11 18:08:29 -07006870 int work_done = 0;
Matt Carlson898a56f2009-08-28 14:02:40 +00006871 struct tg3_hw_status *sblk = tnapi->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07006872
6873 while (1) {
Matt Carlsone64de4e2011-04-13 11:05:05 +00006874 if (sblk->status & SD_STATUS_ERROR)
6875 tg3_process_error(tp);
6876
Matt Carlson35f2d7d2009-11-13 13:03:41 +00006877 tg3_poll_link(tp);
6878
Matt Carlson17375d22009-08-28 14:02:18 +00006879 work_done = tg3_poll_work(tnapi, work_done, budget);
David S. Miller6f535762007-10-11 18:08:29 -07006880
Joe Perches63c3a662011-04-26 08:12:10 +00006881 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
David S. Miller6f535762007-10-11 18:08:29 -07006882 goto tx_recovery;
6883
6884 if (unlikely(work_done >= budget))
6885 break;
6886
Joe Perches63c3a662011-04-26 08:12:10 +00006887 if (tg3_flag(tp, TAGGED_STATUS)) {
Matt Carlson17375d22009-08-28 14:02:18 +00006888 /* tp->last_tag is used in tg3_int_reenable() below
Michael Chan4fd7ab52007-10-12 01:39:50 -07006889 * to tell the hw how much work has been processed,
6890 * so we must read it before checking for more work.
6891 */
Matt Carlson898a56f2009-08-28 14:02:40 +00006892 tnapi->last_tag = sblk->status_tag;
6893 tnapi->last_irq_tag = tnapi->last_tag;
Michael Chan4fd7ab52007-10-12 01:39:50 -07006894 rmb();
6895 } else
6896 sblk->status &= ~SD_STATUS_UPDATED;
6897
Matt Carlson17375d22009-08-28 14:02:18 +00006898 if (likely(!tg3_has_work(tnapi))) {
Ben Hutchings288379f2009-01-19 16:43:59 -08006899 napi_complete(napi);
Matt Carlson17375d22009-08-28 14:02:18 +00006900 tg3_int_reenable(tnapi);
David S. Miller6f535762007-10-11 18:08:29 -07006901 break;
6902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006903 }
6904
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006905 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07006906
6907tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07006908 /* work_done is guaranteed to be less than budget. */
Ben Hutchings288379f2009-01-19 16:43:59 -08006909 napi_complete(napi);
Matt Carlsondb219972011-11-04 09:15:03 +00006910 tg3_reset_task_schedule(tp);
Michael Chan4fd7ab52007-10-12 01:39:50 -07006911 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006912}
6913
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006914static void tg3_napi_disable(struct tg3 *tp)
6915{
6916 int i;
6917
6918 for (i = tp->irq_cnt - 1; i >= 0; i--)
6919 napi_disable(&tp->napi[i].napi);
6920}
6921
6922static void tg3_napi_enable(struct tg3 *tp)
6923{
6924 int i;
6925
6926 for (i = 0; i < tp->irq_cnt; i++)
6927 napi_enable(&tp->napi[i].napi);
6928}
6929
6930static void tg3_napi_init(struct tg3 *tp)
6931{
6932 int i;
6933
6934 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6935 for (i = 1; i < tp->irq_cnt; i++)
6936 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6937}
6938
6939static void tg3_napi_fini(struct tg3 *tp)
6940{
6941 int i;
6942
6943 for (i = 0; i < tp->irq_cnt; i++)
6944 netif_napi_del(&tp->napi[i].napi);
6945}
6946
6947static inline void tg3_netif_stop(struct tg3 *tp)
6948{
6949 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6950 tg3_napi_disable(tp);
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00006951 netif_carrier_off(tp->dev);
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006952 netif_tx_disable(tp->dev);
6953}
6954
Nithin Nayak Sujir35763062012-12-03 19:36:56 +00006955/* tp->lock must be held */
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006956static inline void tg3_netif_start(struct tg3 *tp)
6957{
Matt Carlsonbe947302012-12-03 19:36:57 +00006958 tg3_ptp_resume(tp);
6959
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006960 /* NOTE: unconditional netif_tx_wake_all_queues is only
6961 * appropriate so long as all callers are assured to
6962 * have free tx slots (such as after tg3_init_hw)
6963 */
6964 netif_tx_wake_all_queues(tp->dev);
6965
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00006966 if (tp->link_up)
6967 netif_carrier_on(tp->dev);
6968
Matt Carlson66cfd1b2010-09-30 10:34:30 +00006969 tg3_napi_enable(tp);
6970 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6971 tg3_enable_ints(tp);
6972}
6973
David S. Millerf47c11e2005-06-24 20:18:35 -07006974static void tg3_irq_quiesce(struct tg3 *tp)
6975{
Matt Carlson4f125f42009-09-01 12:55:02 +00006976 int i;
6977
David S. Millerf47c11e2005-06-24 20:18:35 -07006978 BUG_ON(tp->irq_sync);
6979
6980 tp->irq_sync = 1;
6981 smp_mb();
6982
Matt Carlson4f125f42009-09-01 12:55:02 +00006983 for (i = 0; i < tp->irq_cnt; i++)
6984 synchronize_irq(tp->napi[i].irq_vec);
David S. Millerf47c11e2005-06-24 20:18:35 -07006985}
6986
David S. Millerf47c11e2005-06-24 20:18:35 -07006987/* Fully shutdown all tg3 driver activity elsewhere in the system.
6988 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6989 * with as well. Most of the time, this is not necessary except when
6990 * shutting down the device.
6991 */
6992static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6993{
Michael Chan46966542007-07-11 19:47:19 -07006994 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07006995 if (irq_sync)
6996 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07006997}
6998
6999static inline void tg3_full_unlock(struct tg3 *tp)
7000{
David S. Millerf47c11e2005-06-24 20:18:35 -07007001 spin_unlock_bh(&tp->lock);
7002}
7003
Michael Chanfcfa0a32006-03-20 22:28:41 -08007004/* One-shot MSI handler - Chip automatically disables interrupt
7005 * after sending MSI so driver doesn't have to do it.
7006 */
David Howells7d12e782006-10-05 14:55:46 +01007007static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007008{
Matt Carlson09943a12009-08-28 14:01:57 +00007009 struct tg3_napi *tnapi = dev_id;
7010 struct tg3 *tp = tnapi->tp;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007011
Matt Carlson898a56f2009-08-28 14:02:40 +00007012 prefetch(tnapi->hw_status);
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007013 if (tnapi->rx_rcb)
7014 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Michael Chanfcfa0a32006-03-20 22:28:41 -08007015
7016 if (likely(!tg3_irq_sync(tp)))
Matt Carlson09943a12009-08-28 14:01:57 +00007017 napi_schedule(&tnapi->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08007018
7019 return IRQ_HANDLED;
7020}
7021
Michael Chan88b06bc22005-04-21 17:13:25 -07007022/* MSI ISR - No need to check for interrupt sharing and no need to
7023 * flush status block and interrupt mailbox. PCI ordering rules
7024 * guarantee that MSI will arrive after the status block.
7025 */
David Howells7d12e782006-10-05 14:55:46 +01007026static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc22005-04-21 17:13:25 -07007027{
Matt Carlson09943a12009-08-28 14:01:57 +00007028 struct tg3_napi *tnapi = dev_id;
7029 struct tg3 *tp = tnapi->tp;
Michael Chan88b06bc22005-04-21 17:13:25 -07007030
Matt Carlson898a56f2009-08-28 14:02:40 +00007031 prefetch(tnapi->hw_status);
Matt Carlson0c1d0e22009-09-01 13:16:33 +00007032 if (tnapi->rx_rcb)
7033 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Michael Chan88b06bc22005-04-21 17:13:25 -07007034 /*
David S. Millerfac9b832005-05-18 22:46:34 -07007035 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc22005-04-21 17:13:25 -07007036 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07007037 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc22005-04-21 17:13:25 -07007038 * NIC to stop sending us irqs, engaging "in-intr-handler"
7039 * event coalescing.
7040 */
Matt Carlson5b39de92011-08-31 11:44:50 +00007041 tw32_mailbox(tnapi->int_mbox, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07007042 if (likely(!tg3_irq_sync(tp)))
Matt Carlson09943a12009-08-28 14:01:57 +00007043 napi_schedule(&tnapi->napi);
Michael Chan61487482005-09-05 17:53:19 -07007044
Michael Chan88b06bc22005-04-21 17:13:25 -07007045 return IRQ_RETVAL(1);
7046}
7047
David Howells7d12e782006-10-05 14:55:46 +01007048static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007049{
Matt Carlson09943a12009-08-28 14:01:57 +00007050 struct tg3_napi *tnapi = dev_id;
7051 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00007052 struct tg3_hw_status *sblk = tnapi->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007053 unsigned int handled = 1;
7054
Linus Torvalds1da177e2005-04-16 15:20:36 -07007055 /* In INTx mode, it is possible for the interrupt to arrive at
7056 * the CPU before the status block posted prior to the interrupt.
7057 * Reading the PCI State register will confirm whether the
7058 * interrupt is ours and will flush the status block.
7059 */
Michael Chand18edcb2007-03-24 20:57:11 -07007060 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
Joe Perches63c3a662011-04-26 08:12:10 +00007061 if (tg3_flag(tp, CHIP_RESETTING) ||
Michael Chand18edcb2007-03-24 20:57:11 -07007062 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7063 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07007064 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07007065 }
Michael Chand18edcb2007-03-24 20:57:11 -07007066 }
7067
7068 /*
7069 * Writing any value to intr-mbox-0 clears PCI INTA# and
7070 * chip-internal interrupt pending events.
7071 * Writing non-zero to intr-mbox-0 additional tells the
7072 * NIC to stop sending us irqs, engaging "in-intr-handler"
7073 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07007074 *
7075 * Flush the mailbox to de-assert the IRQ immediately to prevent
7076 * spurious interrupts. The flush impacts performance but
7077 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07007078 */
Michael Chanc04cb342007-05-07 00:26:15 -07007079 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07007080 if (tg3_irq_sync(tp))
7081 goto out;
7082 sblk->status &= ~SD_STATUS_UPDATED;
Matt Carlson17375d22009-08-28 14:02:18 +00007083 if (likely(tg3_has_work(tnapi))) {
Matt Carlson72334482009-08-28 14:03:01 +00007084 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Matt Carlson09943a12009-08-28 14:01:57 +00007085 napi_schedule(&tnapi->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07007086 } else {
7087 /* No work, shared interrupt perhaps? re-enable
7088 * interrupts, and flush that PCI write
7089 */
7090 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7091 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07007092 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007093out:
David S. Millerfac9b832005-05-18 22:46:34 -07007094 return IRQ_RETVAL(handled);
7095}
7096
David Howells7d12e782006-10-05 14:55:46 +01007097static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07007098{
Matt Carlson09943a12009-08-28 14:01:57 +00007099 struct tg3_napi *tnapi = dev_id;
7100 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00007101 struct tg3_hw_status *sblk = tnapi->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07007102 unsigned int handled = 1;
7103
David S. Millerfac9b832005-05-18 22:46:34 -07007104 /* In INTx mode, it is possible for the interrupt to arrive at
7105 * the CPU before the status block posted prior to the interrupt.
7106 * Reading the PCI State register will confirm whether the
7107 * interrupt is ours and will flush the status block.
7108 */
Matt Carlson898a56f2009-08-28 14:02:40 +00007109 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
Joe Perches63c3a662011-04-26 08:12:10 +00007110 if (tg3_flag(tp, CHIP_RESETTING) ||
Michael Chand18edcb2007-03-24 20:57:11 -07007111 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7112 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07007113 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007114 }
Michael Chand18edcb2007-03-24 20:57:11 -07007115 }
7116
7117 /*
7118 * writing any value to intr-mbox-0 clears PCI INTA# and
7119 * chip-internal interrupt pending events.
7120 * writing non-zero to intr-mbox-0 additional tells the
7121 * NIC to stop sending us irqs, engaging "in-intr-handler"
7122 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07007123 *
7124 * Flush the mailbox to de-assert the IRQ immediately to prevent
7125 * spurious interrupts. The flush impacts performance but
7126 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07007127 */
Michael Chanc04cb342007-05-07 00:26:15 -07007128 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Matt Carlson624f8e52009-04-20 06:55:01 +00007129
7130 /*
7131 * In a shared interrupt configuration, sometimes other devices'
7132 * interrupts will scream. We record the current status tag here
7133 * so that the above check can report that the screaming interrupts
7134 * are unhandled. Eventually they will be silenced.
7135 */
Matt Carlson898a56f2009-08-28 14:02:40 +00007136 tnapi->last_irq_tag = sblk->status_tag;
Matt Carlson624f8e52009-04-20 06:55:01 +00007137
Michael Chand18edcb2007-03-24 20:57:11 -07007138 if (tg3_irq_sync(tp))
7139 goto out;
Matt Carlson624f8e52009-04-20 06:55:01 +00007140
Matt Carlson72334482009-08-28 14:03:01 +00007141 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
Matt Carlson624f8e52009-04-20 06:55:01 +00007142
Matt Carlson09943a12009-08-28 14:01:57 +00007143 napi_schedule(&tnapi->napi);
Matt Carlson624f8e52009-04-20 06:55:01 +00007144
David S. Millerf47c11e2005-06-24 20:18:35 -07007145out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 return IRQ_RETVAL(handled);
7147}
7148
Michael Chan79381092005-04-21 17:13:59 -07007149/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01007150static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07007151{
Matt Carlson09943a12009-08-28 14:01:57 +00007152 struct tg3_napi *tnapi = dev_id;
7153 struct tg3 *tp = tnapi->tp;
Matt Carlson898a56f2009-08-28 14:02:40 +00007154 struct tg3_hw_status *sblk = tnapi->hw_status;
Michael Chan79381092005-04-21 17:13:59 -07007155
Michael Chanf9804dd2005-09-27 12:13:10 -07007156 if ((sblk->status & SD_STATUS_UPDATED) ||
7157 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07007158 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07007159 return IRQ_RETVAL(1);
7160 }
7161 return IRQ_RETVAL(0);
7162}
7163
Linus Torvalds1da177e2005-04-16 15:20:36 -07007164#ifdef CONFIG_NET_POLL_CONTROLLER
7165static void tg3_poll_controller(struct net_device *dev)
7166{
Matt Carlson4f125f42009-09-01 12:55:02 +00007167 int i;
Michael Chan88b06bc22005-04-21 17:13:25 -07007168 struct tg3 *tp = netdev_priv(dev);
7169
Nithin Nayak Sujir9c13cb82013-01-14 17:10:59 +00007170 if (tg3_irq_sync(tp))
7171 return;
7172
Matt Carlson4f125f42009-09-01 12:55:02 +00007173 for (i = 0; i < tp->irq_cnt; i++)
Louis Rillingfe234f02010-03-09 06:14:41 +00007174 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007175}
7176#endif
7177
Linus Torvalds1da177e2005-04-16 15:20:36 -07007178static void tg3_tx_timeout(struct net_device *dev)
7179{
7180 struct tg3 *tp = netdev_priv(dev);
7181
Michael Chanb0408752007-02-13 12:18:30 -08007182 if (netif_msg_tx_err(tp)) {
Joe Perches05dbe002010-02-17 19:44:19 +00007183 netdev_err(dev, "transmit timed out, resetting\n");
Matt Carlson97bd8e42011-04-13 11:05:04 +00007184 tg3_dump_state(tp);
Michael Chanb0408752007-02-13 12:18:30 -08007185 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007186
Matt Carlsondb219972011-11-04 09:15:03 +00007187 tg3_reset_task_schedule(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007188}
7189
Michael Chanc58ec932005-09-17 00:46:27 -07007190/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7191static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7192{
7193 u32 base = (u32) mapping & 0xffffffff;
7194
Eric Dumazet807540b2010-09-23 05:40:09 +00007195 return (base > 0xffffdcc0) && (base + len + 8 < base);
Michael Chanc58ec932005-09-17 00:46:27 -07007196}
7197
Michael Chan72f2afb2006-03-06 19:28:35 -08007198/* Test for DMA addresses > 40-bit */
7199static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7200 int len)
7201{
7202#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Joe Perches63c3a662011-04-26 08:12:10 +00007203 if (tg3_flag(tp, 40BIT_DMA_BUG))
Eric Dumazet807540b2010-09-23 05:40:09 +00007204 return ((u64) mapping + len) > DMA_BIT_MASK(40);
Michael Chan72f2afb2006-03-06 19:28:35 -08007205 return 0;
7206#else
7207 return 0;
7208#endif
7209}
7210
Matt Carlsond1a3b732011-07-27 14:20:51 +00007211static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
Matt Carlson92cd3a12011-07-27 14:20:47 +00007212 dma_addr_t mapping, u32 len, u32 flags,
7213 u32 mss, u32 vlan)
Matt Carlson2ffcc982011-05-19 12:12:44 +00007214{
Matt Carlson92cd3a12011-07-27 14:20:47 +00007215 txbd->addr_hi = ((u64) mapping >> 32);
7216 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7217 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7218 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
Matt Carlson2ffcc982011-05-19 12:12:44 +00007219}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007220
Matt Carlson84b67b22011-07-27 14:20:52 +00007221static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
Matt Carlsond1a3b732011-07-27 14:20:51 +00007222 dma_addr_t map, u32 len, u32 flags,
7223 u32 mss, u32 vlan)
7224{
7225 struct tg3 *tp = tnapi->tp;
7226 bool hwbug = false;
7227
7228 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
Rusty Russell3db1cd52011-12-19 13:56:45 +00007229 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00007230
7231 if (tg3_4g_overflow_test(map, len))
Rusty Russell3db1cd52011-12-19 13:56:45 +00007232 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00007233
7234 if (tg3_40bit_overflow_test(tp, map, len))
Rusty Russell3db1cd52011-12-19 13:56:45 +00007235 hwbug = true;
Matt Carlsond1a3b732011-07-27 14:20:51 +00007236
Matt Carlsona4cb4282011-12-14 11:09:58 +00007237 if (tp->dma_limit) {
Matt Carlsonb9e45482011-11-04 09:14:59 +00007238 u32 prvidx = *entry;
Matt Carlsone31aa982011-07-27 14:20:53 +00007239 u32 tmp_flag = flags & ~TXD_FLAG_END;
Matt Carlsona4cb4282011-12-14 11:09:58 +00007240 while (len > tp->dma_limit && *budget) {
7241 u32 frag_len = tp->dma_limit;
7242 len -= tp->dma_limit;
Matt Carlsone31aa982011-07-27 14:20:53 +00007243
Matt Carlsonb9e45482011-11-04 09:14:59 +00007244 /* Avoid the 8byte DMA problem */
7245 if (len <= 8) {
Matt Carlsona4cb4282011-12-14 11:09:58 +00007246 len += tp->dma_limit / 2;
7247 frag_len = tp->dma_limit / 2;
Matt Carlsone31aa982011-07-27 14:20:53 +00007248 }
7249
Matt Carlsonb9e45482011-11-04 09:14:59 +00007250 tnapi->tx_buffers[*entry].fragmented = true;
7251
7252 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7253 frag_len, tmp_flag, mss, vlan);
7254 *budget -= 1;
7255 prvidx = *entry;
7256 *entry = NEXT_TX(*entry);
7257
Matt Carlsone31aa982011-07-27 14:20:53 +00007258 map += frag_len;
7259 }
7260
7261 if (len) {
7262 if (*budget) {
7263 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7264 len, flags, mss, vlan);
Matt Carlsonb9e45482011-11-04 09:14:59 +00007265 *budget -= 1;
Matt Carlsone31aa982011-07-27 14:20:53 +00007266 *entry = NEXT_TX(*entry);
7267 } else {
Rusty Russell3db1cd52011-12-19 13:56:45 +00007268 hwbug = true;
Matt Carlsonb9e45482011-11-04 09:14:59 +00007269 tnapi->tx_buffers[prvidx].fragmented = false;
Matt Carlsone31aa982011-07-27 14:20:53 +00007270 }
7271 }
7272 } else {
Matt Carlson84b67b22011-07-27 14:20:52 +00007273 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7274 len, flags, mss, vlan);
Matt Carlsone31aa982011-07-27 14:20:53 +00007275 *entry = NEXT_TX(*entry);
7276 }
Matt Carlsond1a3b732011-07-27 14:20:51 +00007277
7278 return hwbug;
7279}
7280
Matt Carlson0d681b22011-07-27 14:20:49 +00007281static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
Matt Carlson432aa7e2011-05-19 12:12:45 +00007282{
7283 int i;
Matt Carlson0d681b22011-07-27 14:20:49 +00007284 struct sk_buff *skb;
Matt Carlsondf8944c2011-07-27 14:20:46 +00007285 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
Matt Carlson432aa7e2011-05-19 12:12:45 +00007286
Matt Carlson0d681b22011-07-27 14:20:49 +00007287 skb = txb->skb;
7288 txb->skb = NULL;
7289
Matt Carlson432aa7e2011-05-19 12:12:45 +00007290 pci_unmap_single(tnapi->tp->pdev,
7291 dma_unmap_addr(txb, mapping),
7292 skb_headlen(skb),
7293 PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00007294
7295 while (txb->fragmented) {
7296 txb->fragmented = false;
7297 entry = NEXT_TX(entry);
7298 txb = &tnapi->tx_buffers[entry];
7299 }
7300
Matt Carlsonba1142e2011-11-04 09:15:00 +00007301 for (i = 0; i <= last; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00007302 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Matt Carlson432aa7e2011-05-19 12:12:45 +00007303
7304 entry = NEXT_TX(entry);
7305 txb = &tnapi->tx_buffers[entry];
7306
7307 pci_unmap_page(tnapi->tp->pdev,
7308 dma_unmap_addr(txb, mapping),
Eric Dumazet9e903e02011-10-18 21:00:24 +00007309 skb_frag_size(frag), PCI_DMA_TODEVICE);
Matt Carlsone01ee142011-07-27 14:20:50 +00007310
7311 while (txb->fragmented) {
7312 txb->fragmented = false;
7313 entry = NEXT_TX(entry);
7314 txb = &tnapi->tx_buffers[entry];
7315 }
Matt Carlson432aa7e2011-05-19 12:12:45 +00007316 }
7317}
7318
Michael Chan72f2afb2006-03-06 19:28:35 -08007319/* Workaround 4GB and 40-bit hardware DMA bugs. */
Matt Carlson24f4efd2009-11-13 13:03:35 +00007320static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
David S. Miller1805b2f2011-10-24 18:18:09 -04007321 struct sk_buff **pskb,
Matt Carlson84b67b22011-07-27 14:20:52 +00007322 u32 *entry, u32 *budget,
Matt Carlson92cd3a12011-07-27 14:20:47 +00007323 u32 base_flags, u32 mss, u32 vlan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007324{
Matt Carlson24f4efd2009-11-13 13:03:35 +00007325 struct tg3 *tp = tnapi->tp;
David S. Miller1805b2f2011-10-24 18:18:09 -04007326 struct sk_buff *new_skb, *skb = *pskb;
Michael Chanc58ec932005-09-17 00:46:27 -07007327 dma_addr_t new_addr = 0;
Matt Carlson432aa7e2011-05-19 12:12:45 +00007328 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007329
Joe Perches41535772013-02-16 11:20:04 +00007330 if (tg3_asic_rev(tp) != ASIC_REV_5701)
Matt Carlson41588ba2008-04-19 18:12:33 -07007331 new_skb = skb_copy(skb, GFP_ATOMIC);
7332 else {
7333 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7334
7335 new_skb = skb_copy_expand(skb,
7336 skb_headroom(skb) + more_headroom,
7337 skb_tailroom(skb), GFP_ATOMIC);
7338 }
7339
Linus Torvalds1da177e2005-04-16 15:20:36 -07007340 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07007341 ret = -1;
7342 } else {
7343 /* New SKB is guaranteed to be linear. */
Alexander Duyckf4188d82009-12-02 16:48:38 +00007344 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7345 PCI_DMA_TODEVICE);
7346 /* Make sure the mapping succeeded */
7347 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
Alexander Duyckf4188d82009-12-02 16:48:38 +00007348 dev_kfree_skb(new_skb);
Michael Chanc58ec932005-09-17 00:46:27 -07007349 ret = -1;
Michael Chanc58ec932005-09-17 00:46:27 -07007350 } else {
Matt Carlsonb9e45482011-11-04 09:14:59 +00007351 u32 save_entry = *entry;
7352
Matt Carlson92cd3a12011-07-27 14:20:47 +00007353 base_flags |= TXD_FLAG_END;
7354
Matt Carlson84b67b22011-07-27 14:20:52 +00007355 tnapi->tx_buffers[*entry].skb = new_skb;
7356 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
Matt Carlson432aa7e2011-05-19 12:12:45 +00007357 mapping, new_addr);
7358
Matt Carlson84b67b22011-07-27 14:20:52 +00007359 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
Matt Carlsond1a3b732011-07-27 14:20:51 +00007360 new_skb->len, base_flags,
7361 mss, vlan)) {
Matt Carlsonba1142e2011-11-04 09:15:00 +00007362 tg3_tx_skb_unmap(tnapi, save_entry, -1);
Matt Carlsond1a3b732011-07-27 14:20:51 +00007363 dev_kfree_skb(new_skb);
7364 ret = -1;
7365 }
Michael Chanc58ec932005-09-17 00:46:27 -07007366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007367 }
7368
Linus Torvalds1da177e2005-04-16 15:20:36 -07007369 dev_kfree_skb(skb);
David S. Miller1805b2f2011-10-24 18:18:09 -04007370 *pskb = new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07007371 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007372}
7373
Matt Carlson2ffcc982011-05-19 12:12:44 +00007374static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
Michael Chan52c0fd82006-06-29 20:15:54 -07007375
7376/* Use GSO to workaround a rare TSO bug that may be triggered when the
7377 * TSO header is greater than 80 bytes.
7378 */
7379static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7380{
7381 struct sk_buff *segs, *nskb;
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007382 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
Michael Chan52c0fd82006-06-29 20:15:54 -07007383
7384 /* Estimate the number of fragments in the worst case */
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007385 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
Michael Chan52c0fd82006-06-29 20:15:54 -07007386 netif_stop_queue(tp->dev);
Matt Carlsonf65aac12010-08-02 11:26:03 +00007387
7388 /* netif_tx_stop_queue() must be done before checking
7389 * checking tx index in tg3_tx_avail() below, because in
7390 * tg3_tx(), we update tx index before checking for
7391 * netif_tx_queue_stopped().
7392 */
7393 smp_mb();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007394 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
Michael Chan7f62ad52007-02-20 23:25:40 -08007395 return NETDEV_TX_BUSY;
7396
7397 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07007398 }
7399
7400 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07007401 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07007402 goto tg3_tso_bug_end;
7403
7404 do {
7405 nskb = segs;
7406 segs = segs->next;
7407 nskb->next = NULL;
Matt Carlson2ffcc982011-05-19 12:12:44 +00007408 tg3_start_xmit(nskb, tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07007409 } while (segs);
7410
7411tg3_tso_bug_end:
7412 dev_kfree_skb(skb);
7413
7414 return NETDEV_TX_OK;
7415}
Michael Chan52c0fd82006-06-29 20:15:54 -07007416
Michael Chan5a6f3072006-03-20 22:28:05 -08007417/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
Joe Perches63c3a662011-04-26 08:12:10 +00007418 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
Michael Chan5a6f3072006-03-20 22:28:05 -08007419 */
Matt Carlson2ffcc982011-05-19 12:12:44 +00007420static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
Michael Chan5a6f3072006-03-20 22:28:05 -08007421{
7422 struct tg3 *tp = netdev_priv(dev);
Matt Carlson92cd3a12011-07-27 14:20:47 +00007423 u32 len, entry, base_flags, mss, vlan = 0;
Matt Carlson84b67b22011-07-27 14:20:52 +00007424 u32 budget;
Matt Carlson432aa7e2011-05-19 12:12:45 +00007425 int i = -1, would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07007426 dma_addr_t mapping;
Matt Carlson24f4efd2009-11-13 13:03:35 +00007427 struct tg3_napi *tnapi;
7428 struct netdev_queue *txq;
Matt Carlson432aa7e2011-05-19 12:12:45 +00007429 unsigned int last;
Alexander Duyckf4188d82009-12-02 16:48:38 +00007430
Matt Carlson24f4efd2009-11-13 13:03:35 +00007431 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7432 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
Joe Perches63c3a662011-04-26 08:12:10 +00007433 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlson24f4efd2009-11-13 13:03:35 +00007434 tnapi++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435
Matt Carlson84b67b22011-07-27 14:20:52 +00007436 budget = tg3_tx_avail(tnapi);
7437
Michael Chan00b70502006-06-17 21:58:45 -07007438 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007439 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07007440 * interrupt. Furthermore, IRQ processing runs lockless so we have
7441 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442 */
Matt Carlson84b67b22011-07-27 14:20:52 +00007443 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
Matt Carlson24f4efd2009-11-13 13:03:35 +00007444 if (!netif_tx_queue_stopped(txq)) {
7445 netif_tx_stop_queue(txq);
Stephen Hemminger1f064a82005-12-06 17:36:44 -08007446
7447 /* This is a hard error, log it. */
Matt Carlson5129c3a2010-04-05 10:19:23 +00007448 netdev_err(dev,
7449 "BUG! Tx Ring full when queue awake!\n");
Stephen Hemminger1f064a82005-12-06 17:36:44 -08007450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007451 return NETDEV_TX_BUSY;
7452 }
7453
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007454 entry = tnapi->tx_prod;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007455 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07007456 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Matt Carlson24f4efd2009-11-13 13:03:35 +00007458
Matt Carlsonbe98da62010-07-11 09:31:46 +00007459 mss = skb_shinfo(skb)->gso_size;
7460 if (mss) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007461 struct iphdr *iph;
Matt Carlson34195c32010-07-11 09:31:42 +00007462 u32 tcp_opt_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007463
7464 if (skb_header_cloned(skb) &&
Eric Dumazet48855432011-10-24 07:53:03 +00007465 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7466 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007467
Matt Carlson34195c32010-07-11 09:31:42 +00007468 iph = ip_hdr(skb);
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07007469 tcp_opt_len = tcp_optlen(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007470
Eric Dumazeta5a11952012-01-23 01:22:09 +00007471 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
Matt Carlson34195c32010-07-11 09:31:42 +00007472
Eric Dumazeta5a11952012-01-23 01:22:09 +00007473 if (!skb_is_gso_v6(skb)) {
Matt Carlson34195c32010-07-11 09:31:42 +00007474 iph->check = 0;
7475 iph->tot_len = htons(mss + hdr_len);
7476 }
7477
Michael Chan52c0fd82006-06-29 20:15:54 -07007478 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Joe Perches63c3a662011-04-26 08:12:10 +00007479 tg3_flag(tp, TSO_BUG))
Matt Carlsonde6f31e2010-04-12 06:58:30 +00007480 return tg3_tso_bug(tp, skb);
Michael Chan52c0fd82006-06-29 20:15:54 -07007481
Linus Torvalds1da177e2005-04-16 15:20:36 -07007482 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7483 TXD_FLAG_CPU_POST_DMA);
7484
Joe Perches63c3a662011-04-26 08:12:10 +00007485 if (tg3_flag(tp, HW_TSO_1) ||
7486 tg3_flag(tp, HW_TSO_2) ||
7487 tg3_flag(tp, HW_TSO_3)) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07007488 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007489 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07007490 } else
7491 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7492 iph->daddr, 0,
7493 IPPROTO_TCP,
7494 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007495
Joe Perches63c3a662011-04-26 08:12:10 +00007496 if (tg3_flag(tp, HW_TSO_3)) {
Matt Carlson615774f2009-11-13 13:03:39 +00007497 mss |= (hdr_len & 0xc) << 12;
7498 if (hdr_len & 0x10)
7499 base_flags |= 0x00000010;
7500 base_flags |= (hdr_len & 0x3e0) << 5;
Joe Perches63c3a662011-04-26 08:12:10 +00007501 } else if (tg3_flag(tp, HW_TSO_2))
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007502 mss |= hdr_len << 9;
Joe Perches63c3a662011-04-26 08:12:10 +00007503 else if (tg3_flag(tp, HW_TSO_1) ||
Joe Perches41535772013-02-16 11:20:04 +00007504 tg3_asic_rev(tp) == ASIC_REV_5705) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007505 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506 int tsflags;
7507
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007508 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509 mss |= (tsflags << 11);
7510 }
7511 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007512 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513 int tsflags;
7514
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07007515 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007516 base_flags |= tsflags << 12;
7517 }
7518 }
7519 }
Matt Carlsonbf933c82011-01-25 15:58:49 +00007520
Matt Carlson93a700a2011-08-31 11:44:54 +00007521 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7522 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7523 base_flags |= TXD_FLAG_JMB_PKT;
7524
Matt Carlson92cd3a12011-07-27 14:20:47 +00007525 if (vlan_tx_tag_present(skb)) {
7526 base_flags |= TXD_FLAG_VLAN;
7527 vlan = vlan_tx_tag_get(skb);
7528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +00007530 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7531 tg3_flag(tp, TX_TSTAMP_EN)) {
7532 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7533 base_flags |= TXD_FLAG_HWTSTAMP;
7534 }
7535
Alexander Duyckf4188d82009-12-02 16:48:38 +00007536 len = skb_headlen(skb);
7537
7538 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
Eric Dumazet48855432011-10-24 07:53:03 +00007539 if (pci_dma_mapping_error(tp->pdev, mapping))
7540 goto drop;
7541
David S. Miller90079ce2008-09-11 04:52:51 -07007542
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007543 tnapi->tx_buffers[entry].skb = skb;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00007544 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545
7546 would_hit_hwbug = 0;
7547
Joe Perches63c3a662011-04-26 08:12:10 +00007548 if (tg3_flag(tp, 5701_DMA_BUG))
Michael Chanc58ec932005-09-17 00:46:27 -07007549 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550
Matt Carlson84b67b22011-07-27 14:20:52 +00007551 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
Matt Carlsond1a3b732011-07-27 14:20:51 +00007552 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
Matt Carlsonba1142e2011-11-04 09:15:00 +00007553 mss, vlan)) {
Matt Carlsond1a3b732011-07-27 14:20:51 +00007554 would_hit_hwbug = 1;
Matt Carlsonba1142e2011-11-04 09:15:00 +00007555 } else if (skb_shinfo(skb)->nr_frags > 0) {
Matt Carlson92cd3a12011-07-27 14:20:47 +00007556 u32 tmp_mss = mss;
7557
7558 if (!tg3_flag(tp, HW_TSO_1) &&
7559 !tg3_flag(tp, HW_TSO_2) &&
7560 !tg3_flag(tp, HW_TSO_3))
7561 tmp_mss = 0;
7562
Matt Carlsonc5665a52012-02-13 10:20:12 +00007563 /* Now loop through additional data
7564 * fragments, and queue them.
7565 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566 last = skb_shinfo(skb)->nr_frags - 1;
7567 for (i = 0; i <= last; i++) {
7568 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7569
Eric Dumazet9e903e02011-10-18 21:00:24 +00007570 len = skb_frag_size(frag);
Ian Campbelldc234d02011-08-24 22:28:11 +00007571 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01007572 len, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007574 tnapi->tx_buffers[entry].skb = NULL;
FUJITA Tomonori4e5e4f02010-04-12 14:32:09 +00007575 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
Alexander Duyckf4188d82009-12-02 16:48:38 +00007576 mapping);
Ian Campbell5d6bcdf2011-10-06 11:10:48 +01007577 if (dma_mapping_error(&tp->pdev->dev, mapping))
Alexander Duyckf4188d82009-12-02 16:48:38 +00007578 goto dma_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007579
Matt Carlsonb9e45482011-11-04 09:14:59 +00007580 if (!budget ||
7581 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
Matt Carlson84b67b22011-07-27 14:20:52 +00007582 len, base_flags |
7583 ((i == last) ? TXD_FLAG_END : 0),
Matt Carlsonb9e45482011-11-04 09:14:59 +00007584 tmp_mss, vlan)) {
Matt Carlson92c6b8d2009-11-02 14:23:27 +00007585 would_hit_hwbug = 1;
Matt Carlsonb9e45482011-11-04 09:14:59 +00007586 break;
7587 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007588 }
7589 }
7590
7591 if (would_hit_hwbug) {
Matt Carlson0d681b22011-07-27 14:20:49 +00007592 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593
7594 /* If the workaround fails due to memory/mapping
7595 * failure, silently drop this packet.
7596 */
Matt Carlson84b67b22011-07-27 14:20:52 +00007597 entry = tnapi->tx_prod;
7598 budget = tg3_tx_avail(tnapi);
David S. Miller1805b2f2011-10-24 18:18:09 -04007599 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
Matt Carlson84b67b22011-07-27 14:20:52 +00007600 base_flags, mss, vlan))
Eric Dumazet48855432011-10-24 07:53:03 +00007601 goto drop_nofree;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602 }
7603
Richard Cochrand515b452011-06-19 03:31:41 +00007604 skb_tx_timestamp(skb);
Tom Herbert5cb917b2012-03-05 19:53:50 +00007605 netdev_tx_sent_queue(txq, skb->len);
Richard Cochrand515b452011-06-19 03:31:41 +00007606
Michael Chan6541b802012-03-04 14:48:14 +00007607 /* Sync BD data before updating mailbox */
7608 wmb();
7609
Linus Torvalds1da177e2005-04-16 15:20:36 -07007610 /* Packets are ready, update Tx producer idx local and on card. */
Matt Carlson24f4efd2009-11-13 13:03:35 +00007611 tw32_tx_mbox(tnapi->prodmbox, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007612
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007613 tnapi->tx_prod = entry;
7614 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
Matt Carlson24f4efd2009-11-13 13:03:35 +00007615 netif_tx_stop_queue(txq);
Matt Carlsonf65aac12010-08-02 11:26:03 +00007616
7617 /* netif_tx_stop_queue() must be done before checking
7618 * checking tx index in tg3_tx_avail() below, because in
7619 * tg3_tx(), we update tx index before checking for
7620 * netif_tx_queue_stopped().
7621 */
7622 smp_mb();
Matt Carlsonf3f3f272009-08-28 14:03:21 +00007623 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
Matt Carlson24f4efd2009-11-13 13:03:35 +00007624 netif_tx_wake_queue(txq);
Michael Chan51b91462005-09-01 17:41:28 -07007625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626
Eric Dumazetcdd0db02009-05-28 00:00:41 +00007627 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007628 return NETDEV_TX_OK;
Alexander Duyckf4188d82009-12-02 16:48:38 +00007629
7630dma_error:
Matt Carlsonba1142e2011-11-04 09:15:00 +00007631 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
Matt Carlson432aa7e2011-05-19 12:12:45 +00007632 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
Eric Dumazet48855432011-10-24 07:53:03 +00007633drop:
7634 dev_kfree_skb(skb);
7635drop_nofree:
7636 tp->tx_dropped++;
Alexander Duyckf4188d82009-12-02 16:48:38 +00007637 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007638}
7639
Matt Carlson6e01b202011-08-19 13:58:20 +00007640static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7641{
7642 if (enable) {
7643 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7644 MAC_MODE_PORT_MODE_MASK);
7645
7646 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7647
7648 if (!tg3_flag(tp, 5705_PLUS))
7649 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7650
7651 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7652 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7653 else
7654 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7655 } else {
7656 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7657
7658 if (tg3_flag(tp, 5705_PLUS) ||
7659 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
Joe Perches41535772013-02-16 11:20:04 +00007660 tg3_asic_rev(tp) == ASIC_REV_5700)
Matt Carlson6e01b202011-08-19 13:58:20 +00007661 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7662 }
7663
7664 tw32(MAC_MODE, tp->mac_mode);
7665 udelay(40);
7666}
7667
Matt Carlson941ec902011-08-19 13:58:23 +00007668static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007669{
Matt Carlson941ec902011-08-19 13:58:23 +00007670 u32 val, bmcr, mac_mode, ptest = 0;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007671
7672 tg3_phy_toggle_apd(tp, false);
7673 tg3_phy_toggle_automdix(tp, 0);
7674
Matt Carlson941ec902011-08-19 13:58:23 +00007675 if (extlpbk && tg3_phy_set_extloopbk(tp))
7676 return -EIO;
7677
7678 bmcr = BMCR_FULLDPLX;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007679 switch (speed) {
7680 case SPEED_10:
7681 break;
7682 case SPEED_100:
7683 bmcr |= BMCR_SPEED100;
7684 break;
7685 case SPEED_1000:
7686 default:
7687 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7688 speed = SPEED_100;
7689 bmcr |= BMCR_SPEED100;
7690 } else {
7691 speed = SPEED_1000;
7692 bmcr |= BMCR_SPEED1000;
7693 }
7694 }
7695
Matt Carlson941ec902011-08-19 13:58:23 +00007696 if (extlpbk) {
7697 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7698 tg3_readphy(tp, MII_CTRL1000, &val);
7699 val |= CTL1000_AS_MASTER |
7700 CTL1000_ENABLE_MASTER;
7701 tg3_writephy(tp, MII_CTRL1000, val);
7702 } else {
7703 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7704 MII_TG3_FET_PTEST_TRIM_2;
7705 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7706 }
7707 } else
7708 bmcr |= BMCR_LOOPBACK;
7709
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007710 tg3_writephy(tp, MII_BMCR, bmcr);
7711
7712 /* The write needs to be flushed for the FETs */
7713 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7714 tg3_readphy(tp, MII_BMCR, &bmcr);
7715
7716 udelay(40);
7717
7718 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
Joe Perches41535772013-02-16 11:20:04 +00007719 tg3_asic_rev(tp) == ASIC_REV_5785) {
Matt Carlson941ec902011-08-19 13:58:23 +00007720 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007721 MII_TG3_FET_PTEST_FRC_TX_LINK |
7722 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7723
7724 /* The write needs to be flushed for the AC131 */
7725 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7726 }
7727
7728 /* Reset to prevent losing 1st rx packet intermittently */
7729 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7730 tg3_flag(tp, 5780_CLASS)) {
7731 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7732 udelay(10);
7733 tw32_f(MAC_RX_MODE, tp->rx_mode);
7734 }
7735
7736 mac_mode = tp->mac_mode &
7737 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7738 if (speed == SPEED_1000)
7739 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7740 else
7741 mac_mode |= MAC_MODE_PORT_MODE_MII;
7742
Joe Perches41535772013-02-16 11:20:04 +00007743 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007744 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7745
7746 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7747 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7748 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7749 mac_mode |= MAC_MODE_LINK_POLARITY;
7750
7751 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7752 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7753 }
7754
7755 tw32(MAC_MODE, mac_mode);
7756 udelay(40);
Matt Carlson941ec902011-08-19 13:58:23 +00007757
7758 return 0;
Matt Carlson5e5a7f32011-08-19 13:58:21 +00007759}
7760
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007761static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007762{
7763 struct tg3 *tp = netdev_priv(dev);
7764
7765 if (features & NETIF_F_LOOPBACK) {
7766 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7767 return;
7768
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007769 spin_lock_bh(&tp->lock);
Matt Carlson6e01b202011-08-19 13:58:20 +00007770 tg3_mac_loopback(tp, true);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007771 netif_carrier_on(tp->dev);
7772 spin_unlock_bh(&tp->lock);
7773 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7774 } else {
7775 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7776 return;
7777
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007778 spin_lock_bh(&tp->lock);
Matt Carlson6e01b202011-08-19 13:58:20 +00007779 tg3_mac_loopback(tp, false);
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007780 /* Force link status check */
7781 tg3_setup_phy(tp, 1);
7782 spin_unlock_bh(&tp->lock);
7783 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7784 }
7785}
7786
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007787static netdev_features_t tg3_fix_features(struct net_device *dev,
7788 netdev_features_t features)
Michał Mirosławdc668912011-04-07 03:35:07 +00007789{
7790 struct tg3 *tp = netdev_priv(dev);
7791
Joe Perches63c3a662011-04-26 08:12:10 +00007792 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
Michał Mirosławdc668912011-04-07 03:35:07 +00007793 features &= ~NETIF_F_ALL_TSO;
7794
7795 return features;
7796}
7797
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007798static int tg3_set_features(struct net_device *dev, netdev_features_t features)
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007799{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00007800 netdev_features_t changed = dev->features ^ features;
Mahesh Bandewar06c03c02011-05-08 06:51:48 +00007801
7802 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7803 tg3_set_loopback(dev, features);
7804
7805 return 0;
7806}
7807
Matt Carlson21f581a2009-08-28 14:00:25 +00007808static void tg3_rx_prodring_free(struct tg3 *tp,
7809 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007810{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811 int i;
7812
Matt Carlson8fea32b2010-09-15 08:59:58 +00007813 if (tpr != &tp->napi[0].prodring) {
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007814 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
Matt Carlson2c49a442010-09-30 10:34:35 +00007815 i = (i + 1) & tp->rx_std_ring_mask)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007816 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007817 tp->rx_pkt_map_sz);
7818
Joe Perches63c3a662011-04-26 08:12:10 +00007819 if (tg3_flag(tp, JUMBO_CAPABLE)) {
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007820 for (i = tpr->rx_jmb_cons_idx;
7821 i != tpr->rx_jmb_prod_idx;
Matt Carlson2c49a442010-09-30 10:34:35 +00007822 i = (i + 1) & tp->rx_jmb_ring_mask) {
Eric Dumazet9205fd92011-11-18 06:47:01 +00007823 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007824 TG3_RX_JMB_MAP_SZ);
7825 }
7826 }
7827
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007828 return;
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007830
Matt Carlson2c49a442010-09-30 10:34:35 +00007831 for (i = 0; i <= tp->rx_std_ring_mask; i++)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007832 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007833 tp->rx_pkt_map_sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007834
Joe Perches63c3a662011-04-26 08:12:10 +00007835 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007836 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
Eric Dumazet9205fd92011-11-18 06:47:01 +00007837 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007838 TG3_RX_JMB_MAP_SZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007839 }
7840}
7841
Matt Carlsonc6cdf432010-04-05 10:19:26 +00007842/* Initialize rx rings for packet processing.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843 *
7844 * The chip has been shut down and the driver detached from
7845 * the networking, so no interrupts or new tx packets will
7846 * end up in the driver. tp->{tx,}lock are held and thus
7847 * we may not sleep.
7848 */
Matt Carlson21f581a2009-08-28 14:00:25 +00007849static int tg3_rx_prodring_alloc(struct tg3 *tp,
7850 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007851{
Matt Carlson287be122009-08-28 13:58:46 +00007852 u32 i, rx_pkt_dma_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007853
Matt Carlsonb196c7e2009-11-13 13:03:50 +00007854 tpr->rx_std_cons_idx = 0;
7855 tpr->rx_std_prod_idx = 0;
7856 tpr->rx_jmb_cons_idx = 0;
7857 tpr->rx_jmb_prod_idx = 0;
7858
Matt Carlson8fea32b2010-09-15 08:59:58 +00007859 if (tpr != &tp->napi[0].prodring) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007860 memset(&tpr->rx_std_buffers[0], 0,
7861 TG3_RX_STD_BUFF_RING_SIZE(tp));
Matt Carlson48035722010-10-14 10:37:43 +00007862 if (tpr->rx_jmb_buffers)
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007863 memset(&tpr->rx_jmb_buffers[0], 0,
Matt Carlson2c49a442010-09-30 10:34:35 +00007864 TG3_RX_JMB_BUFF_RING_SIZE(tp));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00007865 goto done;
7866 }
7867
Linus Torvalds1da177e2005-04-16 15:20:36 -07007868 /* Zero out all descriptors. */
Matt Carlson2c49a442010-09-30 10:34:35 +00007869 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007870
Matt Carlson287be122009-08-28 13:58:46 +00007871 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
Joe Perches63c3a662011-04-26 08:12:10 +00007872 if (tg3_flag(tp, 5780_CLASS) &&
Matt Carlson287be122009-08-28 13:58:46 +00007873 tp->dev->mtu > ETH_DATA_LEN)
7874 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7875 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
Michael Chan7e72aad2005-07-25 12:31:17 -07007876
Linus Torvalds1da177e2005-04-16 15:20:36 -07007877 /* Initialize invariants of the rings, we only set this
7878 * stuff once. This works because the card does not
7879 * write into the rx buffer posting rings.
7880 */
Matt Carlson2c49a442010-09-30 10:34:35 +00007881 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007882 struct tg3_rx_buffer_desc *rxd;
7883
Matt Carlson21f581a2009-08-28 14:00:25 +00007884 rxd = &tpr->rx_std[i];
Matt Carlson287be122009-08-28 13:58:46 +00007885 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007886 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7887 rxd->opaque = (RXD_OPAQUE_RING_STD |
7888 (i << RXD_OPAQUE_INDEX_SHIFT));
7889 }
7890
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007891 /* Now allocate fresh SKBs for each rx ring. */
7892 for (i = 0; i < tp->rx_pending; i++) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00007893 unsigned int frag_size;
7894
7895 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7896 &frag_size) < 0) {
Matt Carlson5129c3a2010-04-05 10:19:23 +00007897 netdev_warn(tp->dev,
7898 "Using a smaller RX standard ring. Only "
7899 "%d out of %d buffers were allocated "
7900 "successfully\n", i, tp->rx_pending);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007901 if (i == 0)
7902 goto initfail;
7903 tp->rx_pending = i;
7904 break;
7905 }
7906 }
7907
Joe Perches63c3a662011-04-26 08:12:10 +00007908 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007909 goto done;
7910
Matt Carlson2c49a442010-09-30 10:34:35 +00007911 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007912
Joe Perches63c3a662011-04-26 08:12:10 +00007913 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
Matt Carlson0d86df82010-02-17 15:17:00 +00007914 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007915
Matt Carlson2c49a442010-09-30 10:34:35 +00007916 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
Matt Carlson0d86df82010-02-17 15:17:00 +00007917 struct tg3_rx_buffer_desc *rxd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007918
Matt Carlson0d86df82010-02-17 15:17:00 +00007919 rxd = &tpr->rx_jmb[i].std;
7920 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7921 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7922 RXD_FLAG_JUMBO;
7923 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7924 (i << RXD_OPAQUE_INDEX_SHIFT));
7925 }
7926
7927 for (i = 0; i < tp->rx_jumbo_pending; i++) {
Eric Dumazet8d4057a2012-04-27 00:34:49 +00007928 unsigned int frag_size;
7929
7930 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7931 &frag_size) < 0) {
Matt Carlson5129c3a2010-04-05 10:19:23 +00007932 netdev_warn(tp->dev,
7933 "Using a smaller RX jumbo ring. Only %d "
7934 "out of %d buffers were allocated "
7935 "successfully\n", i, tp->rx_jumbo_pending);
Matt Carlson0d86df82010-02-17 15:17:00 +00007936 if (i == 0)
7937 goto initfail;
7938 tp->rx_jumbo_pending = i;
7939 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007940 }
7941 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007942
7943done:
Michael Chan32d8c572006-07-25 16:38:29 -07007944 return 0;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007945
7946initfail:
Matt Carlson21f581a2009-08-28 14:00:25 +00007947 tg3_rx_prodring_free(tp, tpr);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007948 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949}
7950
Matt Carlson21f581a2009-08-28 14:00:25 +00007951static void tg3_rx_prodring_fini(struct tg3 *tp,
7952 struct tg3_rx_prodring_set *tpr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953{
Matt Carlson21f581a2009-08-28 14:00:25 +00007954 kfree(tpr->rx_std_buffers);
7955 tpr->rx_std_buffers = NULL;
7956 kfree(tpr->rx_jmb_buffers);
7957 tpr->rx_jmb_buffers = NULL;
7958 if (tpr->rx_std) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007959 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7960 tpr->rx_std, tpr->rx_std_mapping);
Matt Carlson21f581a2009-08-28 14:00:25 +00007961 tpr->rx_std = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007962 }
Matt Carlson21f581a2009-08-28 14:00:25 +00007963 if (tpr->rx_jmb) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00007964 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7965 tpr->rx_jmb, tpr->rx_jmb_mapping);
Matt Carlson21f581a2009-08-28 14:00:25 +00007966 tpr->rx_jmb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007967 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007968}
7969
Matt Carlson21f581a2009-08-28 14:00:25 +00007970static int tg3_rx_prodring_init(struct tg3 *tp,
7971 struct tg3_rx_prodring_set *tpr)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007972{
Matt Carlson2c49a442010-09-30 10:34:35 +00007973 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7974 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007975 if (!tpr->rx_std_buffers)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007976 return -ENOMEM;
7977
Matt Carlson4bae65c2010-11-24 08:31:52 +00007978 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7979 TG3_RX_STD_RING_BYTES(tp),
7980 &tpr->rx_std_mapping,
7981 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007982 if (!tpr->rx_std)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007983 goto err_out;
7984
Joe Perches63c3a662011-04-26 08:12:10 +00007985 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
Matt Carlson2c49a442010-09-30 10:34:35 +00007986 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
Matt Carlson21f581a2009-08-28 14:00:25 +00007987 GFP_KERNEL);
7988 if (!tpr->rx_jmb_buffers)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007989 goto err_out;
7990
Matt Carlson4bae65c2010-11-24 08:31:52 +00007991 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7992 TG3_RX_JMB_RING_BYTES(tp),
7993 &tpr->rx_jmb_mapping,
7994 GFP_KERNEL);
Matt Carlson21f581a2009-08-28 14:00:25 +00007995 if (!tpr->rx_jmb)
Matt Carlsoncf7a7292009-08-28 13:59:57 +00007996 goto err_out;
7997 }
7998
7999 return 0;
8000
8001err_out:
Matt Carlson21f581a2009-08-28 14:00:25 +00008002 tg3_rx_prodring_fini(tp, tpr);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008003 return -ENOMEM;
8004}
8005
8006/* Free up pending packets in all rx/tx rings.
8007 *
8008 * The chip has been shut down and the driver detached from
8009 * the networking, so no interrupts or new tx packets will
8010 * end up in the driver. tp->{tx,}lock is not held and we are not
8011 * in an interrupt context and thus may sleep.
8012 */
8013static void tg3_free_rings(struct tg3 *tp)
8014{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008015 int i, j;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008016
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008017 for (j = 0; j < tp->irq_cnt; j++) {
8018 struct tg3_napi *tnapi = &tp->napi[j];
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008019
Matt Carlson8fea32b2010-09-15 08:59:58 +00008020 tg3_rx_prodring_free(tp, &tnapi->prodring);
Matt Carlsonb28f6422010-06-05 17:24:32 +00008021
Matt Carlson0c1d0e22009-09-01 13:16:33 +00008022 if (!tnapi->tx_buffers)
8023 continue;
8024
Matt Carlson0d681b22011-07-27 14:20:49 +00008025 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8026 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008027
Matt Carlson0d681b22011-07-27 14:20:49 +00008028 if (!skb)
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008029 continue;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008030
Matt Carlsonba1142e2011-11-04 09:15:00 +00008031 tg3_tx_skb_unmap(tnapi, i,
8032 skb_shinfo(skb)->nr_frags - 1);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008033
8034 dev_kfree_skb_any(skb);
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008035 }
Tom Herbert5cb917b2012-03-05 19:53:50 +00008036 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00008037 }
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008038}
8039
8040/* Initialize tx/rx rings for packet processing.
8041 *
8042 * The chip has been shut down and the driver detached from
8043 * the networking, so no interrupts or new tx packets will
8044 * end up in the driver. tp->{tx,}lock are held and thus
8045 * we may not sleep.
8046 */
8047static int tg3_init_rings(struct tg3 *tp)
8048{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008049 int i;
Matt Carlson72334482009-08-28 14:03:01 +00008050
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008051 /* Free up all the SKBs. */
8052 tg3_free_rings(tp);
8053
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008054 for (i = 0; i < tp->irq_cnt; i++) {
8055 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008056
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008057 tnapi->last_tag = 0;
8058 tnapi->last_irq_tag = 0;
8059 tnapi->hw_status->status = 0;
8060 tnapi->hw_status->status_tag = 0;
8061 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8062
8063 tnapi->tx_prod = 0;
8064 tnapi->tx_cons = 0;
Matt Carlson0c1d0e22009-09-01 13:16:33 +00008065 if (tnapi->tx_ring)
8066 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008067
8068 tnapi->rx_rcb_ptr = 0;
Matt Carlson0c1d0e22009-09-01 13:16:33 +00008069 if (tnapi->rx_rcb)
8070 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
Matt Carlson2b2cdb62009-11-13 13:03:48 +00008071
Matt Carlson8fea32b2010-09-15 08:59:58 +00008072 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
Matt Carlsone4af1af2010-02-12 14:47:05 +00008073 tg3_free_rings(tp);
Matt Carlson2b2cdb62009-11-13 13:03:48 +00008074 return -ENOMEM;
Matt Carlsone4af1af2010-02-12 14:47:05 +00008075 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008076 }
Matt Carlson72334482009-08-28 14:03:01 +00008077
Matt Carlson2b2cdb62009-11-13 13:03:48 +00008078 return 0;
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008079}
8080
Michael Chan49a359e2012-09-28 07:12:37 +00008081static void tg3_mem_tx_release(struct tg3 *tp)
8082{
8083 int i;
8084
8085 for (i = 0; i < tp->irq_max; i++) {
8086 struct tg3_napi *tnapi = &tp->napi[i];
8087
8088 if (tnapi->tx_ring) {
8089 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8090 tnapi->tx_ring, tnapi->tx_desc_mapping);
8091 tnapi->tx_ring = NULL;
8092 }
8093
8094 kfree(tnapi->tx_buffers);
8095 tnapi->tx_buffers = NULL;
8096 }
8097}
8098
8099static int tg3_mem_tx_acquire(struct tg3 *tp)
8100{
8101 int i;
8102 struct tg3_napi *tnapi = &tp->napi[0];
8103
8104 /* If multivector TSS is enabled, vector 0 does not handle
8105 * tx interrupts. Don't allocate any resources for it.
8106 */
8107 if (tg3_flag(tp, ENABLE_TSS))
8108 tnapi++;
8109
8110 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8111 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8112 TG3_TX_RING_SIZE, GFP_KERNEL);
8113 if (!tnapi->tx_buffers)
8114 goto err_out;
8115
8116 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8117 TG3_TX_RING_BYTES,
8118 &tnapi->tx_desc_mapping,
8119 GFP_KERNEL);
8120 if (!tnapi->tx_ring)
8121 goto err_out;
8122 }
8123
8124 return 0;
8125
8126err_out:
8127 tg3_mem_tx_release(tp);
8128 return -ENOMEM;
8129}
8130
8131static void tg3_mem_rx_release(struct tg3 *tp)
8132{
8133 int i;
8134
8135 for (i = 0; i < tp->irq_max; i++) {
8136 struct tg3_napi *tnapi = &tp->napi[i];
8137
8138 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8139
8140 if (!tnapi->rx_rcb)
8141 continue;
8142
8143 dma_free_coherent(&tp->pdev->dev,
8144 TG3_RX_RCB_RING_BYTES(tp),
8145 tnapi->rx_rcb,
8146 tnapi->rx_rcb_mapping);
8147 tnapi->rx_rcb = NULL;
8148 }
8149}
8150
8151static int tg3_mem_rx_acquire(struct tg3 *tp)
8152{
8153 unsigned int i, limit;
8154
8155 limit = tp->rxq_cnt;
8156
8157 /* If RSS is enabled, we need a (dummy) producer ring
8158 * set on vector zero. This is the true hw prodring.
8159 */
8160 if (tg3_flag(tp, ENABLE_RSS))
8161 limit++;
8162
8163 for (i = 0; i < limit; i++) {
8164 struct tg3_napi *tnapi = &tp->napi[i];
8165
8166 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8167 goto err_out;
8168
8169 /* If multivector RSS is enabled, vector 0
8170 * does not handle rx or tx interrupts.
8171 * Don't allocate any resources for it.
8172 */
8173 if (!i && tg3_flag(tp, ENABLE_RSS))
8174 continue;
8175
8176 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8177 TG3_RX_RCB_RING_BYTES(tp),
8178 &tnapi->rx_rcb_mapping,
8179 GFP_KERNEL);
8180 if (!tnapi->rx_rcb)
8181 goto err_out;
8182
8183 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8184 }
8185
8186 return 0;
8187
8188err_out:
8189 tg3_mem_rx_release(tp);
8190 return -ENOMEM;
8191}
8192
Matt Carlsoncf7a7292009-08-28 13:59:57 +00008193/*
8194 * Must not be invoked with interrupt sources disabled and
8195 * the hardware shutdown down.
8196 */
8197static void tg3_free_consistent(struct tg3 *tp)
8198{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008199 int i;
Matt Carlson898a56f2009-08-28 14:02:40 +00008200
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008201 for (i = 0; i < tp->irq_cnt; i++) {
8202 struct tg3_napi *tnapi = &tp->napi[i];
8203
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008204 if (tnapi->hw_status) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00008205 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8206 tnapi->hw_status,
8207 tnapi->status_mapping);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008208 tnapi->hw_status = NULL;
8209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008211
Michael Chan49a359e2012-09-28 07:12:37 +00008212 tg3_mem_rx_release(tp);
8213 tg3_mem_tx_release(tp);
8214
Linus Torvalds1da177e2005-04-16 15:20:36 -07008215 if (tp->hw_stats) {
Matt Carlson4bae65c2010-11-24 08:31:52 +00008216 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8217 tp->hw_stats, tp->stats_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008218 tp->hw_stats = NULL;
8219 }
8220}
8221
8222/*
8223 * Must not be invoked with interrupt sources disabled and
8224 * the hardware shutdown down. Can sleep.
8225 */
8226static int tg3_alloc_consistent(struct tg3 *tp)
8227{
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008228 int i;
Matt Carlson898a56f2009-08-28 14:02:40 +00008229
Matt Carlson4bae65c2010-11-24 08:31:52 +00008230 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8231 sizeof(struct tg3_hw_stats),
8232 &tp->stats_mapping,
8233 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008234 if (!tp->hw_stats)
8235 goto err_out;
8236
Linus Torvalds1da177e2005-04-16 15:20:36 -07008237 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8238
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008239 for (i = 0; i < tp->irq_cnt; i++) {
8240 struct tg3_napi *tnapi = &tp->napi[i];
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00008241 struct tg3_hw_status *sblk;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008242
Matt Carlson4bae65c2010-11-24 08:31:52 +00008243 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8244 TG3_HW_STATUS_SIZE,
8245 &tnapi->status_mapping,
8246 GFP_KERNEL);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008247 if (!tnapi->hw_status)
8248 goto err_out;
8249
8250 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00008251 sblk = tnapi->hw_status;
8252
Michael Chan49a359e2012-09-28 07:12:37 +00008253 if (tg3_flag(tp, ENABLE_RSS)) {
Michael Chan86449942012-10-02 20:31:14 -07008254 u16 *prodptr = NULL;
Matt Carlson8fea32b2010-09-15 08:59:58 +00008255
Michael Chan49a359e2012-09-28 07:12:37 +00008256 /*
8257 * When RSS is enabled, the status block format changes
8258 * slightly. The "rx_jumbo_consumer", "reserved",
8259 * and "rx_mini_consumer" members get mapped to the
8260 * other three rx return ring producer indexes.
8261 */
8262 switch (i) {
8263 case 1:
8264 prodptr = &sblk->idx[0].rx_producer;
8265 break;
8266 case 2:
8267 prodptr = &sblk->rx_jumbo_consumer;
8268 break;
8269 case 3:
8270 prodptr = &sblk->reserved;
8271 break;
8272 case 4:
8273 prodptr = &sblk->rx_mini_consumer;
Matt Carlsonf891ea12012-04-24 13:37:01 +00008274 break;
8275 }
Michael Chan49a359e2012-09-28 07:12:37 +00008276 tnapi->rx_rcb_prod_idx = prodptr;
8277 } else {
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00008278 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
Matt Carlson8d9d7cf2009-09-01 13:19:05 +00008279 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008280 }
8281
Michael Chan49a359e2012-09-28 07:12:37 +00008282 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8283 goto err_out;
8284
Linus Torvalds1da177e2005-04-16 15:20:36 -07008285 return 0;
8286
8287err_out:
8288 tg3_free_consistent(tp);
8289 return -ENOMEM;
8290}
8291
8292#define MAX_WAIT_CNT 1000
8293
8294/* To stop a block, clear the enable bit and poll till it
8295 * clears. tp->lock is held.
8296 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008297static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008298{
8299 unsigned int i;
8300 u32 val;
8301
Joe Perches63c3a662011-04-26 08:12:10 +00008302 if (tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008303 switch (ofs) {
8304 case RCVLSC_MODE:
8305 case DMAC_MODE:
8306 case MBFREE_MODE:
8307 case BUFMGR_MODE:
8308 case MEMARB_MODE:
8309 /* We can't enable/disable these bits of the
8310 * 5705/5750, just say success.
8311 */
8312 return 0;
8313
8314 default:
8315 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07008316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008317 }
8318
8319 val = tr32(ofs);
8320 val &= ~enable_bit;
8321 tw32_f(ofs, val);
8322
8323 for (i = 0; i < MAX_WAIT_CNT; i++) {
8324 udelay(100);
8325 val = tr32(ofs);
8326 if ((val & enable_bit) == 0)
8327 break;
8328 }
8329
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008330 if (i == MAX_WAIT_CNT && !silent) {
Matt Carlson2445e462010-04-05 10:19:21 +00008331 dev_err(&tp->pdev->dev,
8332 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8333 ofs, enable_bit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008334 return -ENODEV;
8335 }
8336
8337 return 0;
8338}
8339
8340/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008341static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008342{
8343 int i, err;
8344
8345 tg3_disable_ints(tp);
8346
8347 tp->rx_mode &= ~RX_MODE_ENABLE;
8348 tw32_f(MAC_RX_MODE, tp->rx_mode);
8349 udelay(10);
8350
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008351 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8352 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8353 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8354 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8355 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8356 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008357
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008358 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8359 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8360 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8361 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8362 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8363 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8364 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008365
8366 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8367 tw32_f(MAC_MODE, tp->mac_mode);
8368 udelay(40);
8369
8370 tp->tx_mode &= ~TX_MODE_ENABLE;
8371 tw32_f(MAC_TX_MODE, tp->tx_mode);
8372
8373 for (i = 0; i < MAX_WAIT_CNT; i++) {
8374 udelay(100);
8375 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8376 break;
8377 }
8378 if (i >= MAX_WAIT_CNT) {
Matt Carlsonab96b242010-04-05 10:19:22 +00008379 dev_err(&tp->pdev->dev,
8380 "%s timed out, TX_MODE_ENABLE will not clear "
8381 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07008382 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008383 }
8384
Michael Chane6de8ad2005-05-05 14:42:41 -07008385 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008386 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8387 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008388
8389 tw32(FTQ_RESET, 0xffffffff);
8390 tw32(FTQ_RESET, 0x00000000);
8391
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008392 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8393 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008394
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008395 for (i = 0; i < tp->irq_cnt; i++) {
8396 struct tg3_napi *tnapi = &tp->napi[i];
8397 if (tnapi->hw_status)
8398 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008400
Linus Torvalds1da177e2005-04-16 15:20:36 -07008401 return err;
8402}
8403
Michael Chanee6a99b2007-07-18 21:49:10 -07008404/* Save PCI command register before chip reset */
8405static void tg3_save_pci_state(struct tg3 *tp)
8406{
Matt Carlson8a6eac92007-10-21 16:17:55 -07008407 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07008408}
8409
8410/* Restore PCI state after chip reset */
8411static void tg3_restore_pci_state(struct tg3 *tp)
8412{
8413 u32 val;
8414
8415 /* Re-enable indirect register accesses. */
8416 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8417 tp->misc_host_ctrl);
8418
8419 /* Set MAX PCI retry to zero. */
8420 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
Joe Perches41535772013-02-16 11:20:04 +00008421 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008422 tg3_flag(tp, PCIX_MODE))
Michael Chanee6a99b2007-07-18 21:49:10 -07008423 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07008424 /* Allow reads and writes to the APE register and memory space. */
Joe Perches63c3a662011-04-26 08:12:10 +00008425 if (tg3_flag(tp, ENABLE_APE))
Matt Carlson0d3031d2007-10-10 18:02:43 -07008426 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +00008427 PCISTATE_ALLOW_APE_SHMEM_WR |
8428 PCISTATE_ALLOW_APE_PSPACE_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07008429 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8430
Matt Carlson8a6eac92007-10-21 16:17:55 -07008431 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07008432
Matt Carlson2c55a3d2011-11-28 09:41:04 +00008433 if (!tg3_flag(tp, PCI_EXPRESS)) {
8434 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8435 tp->pci_cacheline_sz);
8436 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8437 tp->pci_lat_timer);
Michael Chan114342f2007-10-15 02:12:26 -07008438 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08008439
Michael Chanee6a99b2007-07-18 21:49:10 -07008440 /* Make sure PCI-X relaxed ordering bit is clear. */
Joe Perches63c3a662011-04-26 08:12:10 +00008441 if (tg3_flag(tp, PCIX_MODE)) {
Matt Carlson9974a352007-10-07 23:27:28 -07008442 u16 pcix_cmd;
8443
8444 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8445 &pcix_cmd);
8446 pcix_cmd &= ~PCI_X_CMD_ERO;
8447 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8448 pcix_cmd);
8449 }
Michael Chanee6a99b2007-07-18 21:49:10 -07008450
Joe Perches63c3a662011-04-26 08:12:10 +00008451 if (tg3_flag(tp, 5780_CLASS)) {
Michael Chanee6a99b2007-07-18 21:49:10 -07008452
8453 /* Chip reset on 5780 will reset MSI enable bit,
8454 * so need to restore it.
8455 */
Joe Perches63c3a662011-04-26 08:12:10 +00008456 if (tg3_flag(tp, USING_MSI)) {
Michael Chanee6a99b2007-07-18 21:49:10 -07008457 u16 ctrl;
8458
8459 pci_read_config_word(tp->pdev,
8460 tp->msi_cap + PCI_MSI_FLAGS,
8461 &ctrl);
8462 pci_write_config_word(tp->pdev,
8463 tp->msi_cap + PCI_MSI_FLAGS,
8464 ctrl | PCI_MSI_FLAGS_ENABLE);
8465 val = tr32(MSGINT_MODE);
8466 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8467 }
8468 }
8469}
8470
Linus Torvalds1da177e2005-04-16 15:20:36 -07008471/* tp->lock is held. */
8472static int tg3_chip_reset(struct tg3 *tp)
8473{
8474 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07008475 void (*write_op)(struct tg3 *, u32, u32);
Matt Carlson4f125f42009-09-01 12:55:02 +00008476 int i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008477
David S. Millerf49639e2006-06-09 11:58:36 -07008478 tg3_nvram_lock(tp);
8479
Matt Carlson77b483f2008-08-15 14:07:24 -07008480 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8481
David S. Millerf49639e2006-06-09 11:58:36 -07008482 /* No matching tg3_nvram_unlock() after this because
8483 * chip reset below will undo the nvram lock.
8484 */
8485 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008486
Michael Chanee6a99b2007-07-18 21:49:10 -07008487 /* GRC_MISC_CFG core clock reset will clear the memory
8488 * enable bit in PCI register 4 and the MSI enable bit
8489 * on some chips, so we save relevant registers here.
8490 */
8491 tg3_save_pci_state(tp);
8492
Joe Perches41535772013-02-16 11:20:04 +00008493 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
Joe Perches63c3a662011-04-26 08:12:10 +00008494 tg3_flag(tp, 5755_PLUS))
Michael Chand9ab5ad12006-03-20 22:27:35 -08008495 tw32(GRC_FASTBOOT_PC, 0);
8496
Linus Torvalds1da177e2005-04-16 15:20:36 -07008497 /*
8498 * We must avoid the readl() that normally takes place.
8499 * It locks machines, causes machine checks, and other
8500 * fun things. So, temporarily disable the 5701
8501 * hardware workaround, while we do the reset.
8502 */
Michael Chan1ee582d2005-08-09 20:16:46 -07008503 write_op = tp->write32;
8504 if (write_op == tg3_write_flush_reg32)
8505 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008506
Michael Chand18edcb2007-03-24 20:57:11 -07008507 /* Prevent the irq handler from reading or writing PCI registers
8508 * during chip reset when the memory enable bit in the PCI command
8509 * register may be cleared. The chip does not generate interrupt
8510 * at this time, but the irq handler may still be called due to irq
8511 * sharing or irqpoll.
8512 */
Joe Perches63c3a662011-04-26 08:12:10 +00008513 tg3_flag_set(tp, CHIP_RESETTING);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008514 for (i = 0; i < tp->irq_cnt; i++) {
8515 struct tg3_napi *tnapi = &tp->napi[i];
8516 if (tnapi->hw_status) {
8517 tnapi->hw_status->status = 0;
8518 tnapi->hw_status->status_tag = 0;
8519 }
8520 tnapi->last_tag = 0;
8521 tnapi->last_irq_tag = 0;
Michael Chanb8fa2f32007-04-06 17:35:37 -07008522 }
Michael Chand18edcb2007-03-24 20:57:11 -07008523 smp_mb();
Matt Carlson4f125f42009-09-01 12:55:02 +00008524
8525 for (i = 0; i < tp->irq_cnt; i++)
8526 synchronize_irq(tp->napi[i].irq_vec);
Michael Chand18edcb2007-03-24 20:57:11 -07008527
Joe Perches41535772013-02-16 11:20:04 +00008528 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
Matt Carlson255ca312009-08-25 10:07:27 +00008529 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8530 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8531 }
8532
Linus Torvalds1da177e2005-04-16 15:20:36 -07008533 /* do the reset */
8534 val = GRC_MISC_CFG_CORECLK_RESET;
8535
Joe Perches63c3a662011-04-26 08:12:10 +00008536 if (tg3_flag(tp, PCI_EXPRESS)) {
Matt Carlson88075d92010-08-02 11:25:58 +00008537 /* Force PCIe 1.0a mode */
Joe Perches41535772013-02-16 11:20:04 +00008538 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008539 !tg3_flag(tp, 57765_PLUS) &&
Matt Carlson88075d92010-08-02 11:25:58 +00008540 tr32(TG3_PCIE_PHY_TSTCTL) ==
8541 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8542 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8543
Joe Perches41535772013-02-16 11:20:04 +00008544 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008545 tw32(GRC_MISC_CFG, (1 << 29));
8546 val |= (1 << 29);
8547 }
8548 }
8549
Joe Perches41535772013-02-16 11:20:04 +00008550 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chanb5d37722006-09-27 16:06:21 -07008551 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8552 tw32(GRC_VCPU_EXT_CTRL,
8553 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8554 }
8555
Matt Carlsonf37500d2010-08-02 11:25:59 +00008556 /* Manage gphy power for all CPMU absent PCIe devices. */
Joe Perches63c3a662011-04-26 08:12:10 +00008557 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008558 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
Matt Carlsonf37500d2010-08-02 11:25:59 +00008559
Linus Torvalds1da177e2005-04-16 15:20:36 -07008560 tw32(GRC_MISC_CFG, val);
8561
Michael Chan1ee582d2005-08-09 20:16:46 -07008562 /* restore 5701 hardware bug workaround write method */
8563 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008564
8565 /* Unfortunately, we have to delay before the PCI read back.
8566 * Some 575X chips even will not respond to a PCI cfg access
8567 * when the reset command is given to the chip.
8568 *
8569 * How do these hardware designers expect things to work
8570 * properly if the PCI write is posted for a long period
8571 * of time? It is always necessary to have some method by
8572 * which a register read back can occur to push the write
8573 * out which does the reset.
8574 *
8575 * For most tg3 variants the trick below was working.
8576 * Ho hum...
8577 */
8578 udelay(120);
8579
8580 /* Flush PCI posted writes. The normal MMIO registers
8581 * are inaccessible at this time so this is the only
8582 * way to make this reliably (actually, this is no longer
8583 * the case, see above). I tried to use indirect
8584 * register read/write but this upset some 5701 variants.
8585 */
8586 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8587
8588 udelay(120);
8589
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008590 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
Matt Carlsone7126992009-08-25 10:08:16 +00008591 u16 val16;
8592
Joe Perches41535772013-02-16 11:20:04 +00008593 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
Michael Chan86449942012-10-02 20:31:14 -07008594 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008595 u32 cfg_val;
8596
8597 /* Wait for link training to complete. */
Michael Chan86449942012-10-02 20:31:14 -07008598 for (j = 0; j < 5000; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008599 udelay(100);
8600
8601 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8602 pci_write_config_dword(tp->pdev, 0xc4,
8603 cfg_val | (1 << 15));
8604 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008605
Matt Carlsone7126992009-08-25 10:08:16 +00008606 /* Clear the "no snoop" and "relaxed ordering" bits. */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008607 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
Matt Carlsone7126992009-08-25 10:08:16 +00008608 /*
8609 * Older PCIe devices only support the 128 byte
8610 * MPS setting. Enforce the restriction.
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008611 */
Joe Perches63c3a662011-04-26 08:12:10 +00008612 if (!tg3_flag(tp, CPMU_PRESENT))
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008613 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8614 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008615
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008616 /* Clear error status */
Jiang Liu0f49bfb2012-08-20 13:28:20 -06008617 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
Matt Carlson5e7dfd02008-11-21 17:18:16 -08008618 PCI_EXP_DEVSTA_CED |
8619 PCI_EXP_DEVSTA_NFED |
8620 PCI_EXP_DEVSTA_FED |
8621 PCI_EXP_DEVSTA_URD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008622 }
8623
Michael Chanee6a99b2007-07-18 21:49:10 -07008624 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008625
Joe Perches63c3a662011-04-26 08:12:10 +00008626 tg3_flag_clear(tp, CHIP_RESETTING);
8627 tg3_flag_clear(tp, ERROR_PROCESSED);
Michael Chand18edcb2007-03-24 20:57:11 -07008628
Michael Chanee6a99b2007-07-18 21:49:10 -07008629 val = 0;
Joe Perches63c3a662011-04-26 08:12:10 +00008630 if (tg3_flag(tp, 5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -07008631 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07008632 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008633
Joe Perches41535772013-02-16 11:20:04 +00008634 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008635 tg3_stop_fw(tp);
8636 tw32(0x5000, 0x400);
8637 }
8638
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +00008639 if (tg3_flag(tp, IS_SSB_CORE)) {
8640 /*
8641 * BCM4785: In order to avoid repercussions from using
8642 * potentially defective internal ROM, stop the Rx RISC CPU,
8643 * which is not required.
8644 */
8645 tg3_stop_fw(tp);
8646 tg3_halt_cpu(tp, RX_CPU_BASE);
8647 }
8648
Linus Torvalds1da177e2005-04-16 15:20:36 -07008649 tw32(GRC_MODE, tp->grc_mode);
8650
Joe Perches41535772013-02-16 11:20:04 +00008651 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008652 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008653
8654 tw32(0xc4, val | (1 << 15));
8655 }
8656
8657 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
Joe Perches41535772013-02-16 11:20:04 +00008658 tg3_asic_rev(tp) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008659 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
Joe Perches41535772013-02-16 11:20:04 +00008660 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008661 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8662 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8663 }
8664
Matt Carlsonf07e9af2010-08-02 11:26:07 +00008665 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Matt Carlson9e975cc2011-07-20 10:20:50 +00008666 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008667 val = tp->mac_mode;
Matt Carlsonf07e9af2010-08-02 11:26:07 +00008668 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
Matt Carlson9e975cc2011-07-20 10:20:50 +00008669 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008670 val = tp->mac_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008671 } else
Matt Carlsond2394e6b2010-11-24 08:31:47 +00008672 val = 0;
8673
8674 tw32_f(MAC_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008675 udelay(40);
8676
Matt Carlson77b483f2008-08-15 14:07:24 -07008677 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8678
Michael Chan7a6f4362006-09-27 16:03:31 -07008679 err = tg3_poll_fw(tp);
8680 if (err)
8681 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008682
Matt Carlson0a9140c2009-08-28 12:27:50 +00008683 tg3_mdio_start(tp);
8684
Joe Perches63c3a662011-04-26 08:12:10 +00008685 if (tg3_flag(tp, PCI_EXPRESS) &&
Joe Perches41535772013-02-16 11:20:04 +00008686 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8687 tg3_asic_rev(tp) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +00008688 !tg3_flag(tp, 57765_PLUS)) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008689 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008690
8691 tw32(0x7c00, val | (1 << 25));
8692 }
8693
Joe Perches41535772013-02-16 11:20:04 +00008694 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
Matt Carlsond78b59f2011-04-05 14:22:46 +00008695 val = tr32(TG3_CPMU_CLCK_ORIDE);
8696 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8697 }
8698
Linus Torvalds1da177e2005-04-16 15:20:36 -07008699 /* Reprobe ASF enable state. */
Joe Perches63c3a662011-04-26 08:12:10 +00008700 tg3_flag_clear(tp, ENABLE_ASF);
8701 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008702 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8703 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8704 u32 nic_cfg;
8705
8706 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8707 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
Joe Perches63c3a662011-04-26 08:12:10 +00008708 tg3_flag_set(tp, ENABLE_ASF);
Matt Carlson4ba526c2008-08-15 14:10:04 -07008709 tp->last_event_jiffies = jiffies;
Joe Perches63c3a662011-04-26 08:12:10 +00008710 if (tg3_flag(tp, 5750_PLUS))
8711 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008712 }
8713 }
8714
8715 return 0;
8716}
8717
Matt Carlson65ec6982012-02-28 23:33:37 +00008718static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8719static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
Matt Carlson92feeab2011-12-08 14:40:14 +00008720
Linus Torvalds1da177e2005-04-16 15:20:36 -07008721/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07008722static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008723{
8724 int err;
8725
8726 tg3_stop_fw(tp);
8727
Michael Chan944d9802005-05-29 14:57:48 -07008728 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008729
David S. Millerb3b7d6b2005-05-05 14:40:20 -07008730 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008731 err = tg3_chip_reset(tp);
8732
Matt Carlsondaba2a62009-04-20 06:58:52 +00008733 __tg3_set_mac_addr(tp, 0);
8734
Michael Chan944d9802005-05-29 14:57:48 -07008735 tg3_write_sig_legacy(tp, kind);
8736 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008737
Matt Carlson92feeab2011-12-08 14:40:14 +00008738 if (tp->hw_stats) {
8739 /* Save the stats across chip resets... */
David S. Millerb4017c52012-03-01 17:57:40 -05008740 tg3_get_nstats(tp, &tp->net_stats_prev);
Matt Carlson92feeab2011-12-08 14:40:14 +00008741 tg3_get_estats(tp, &tp->estats_prev);
8742
8743 /* And make sure the next sample is new data */
8744 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8745 }
8746
Linus Torvalds1da177e2005-04-16 15:20:36 -07008747 if (err)
8748 return err;
8749
8750 return 0;
8751}
8752
Linus Torvalds1da177e2005-04-16 15:20:36 -07008753static int tg3_set_mac_addr(struct net_device *dev, void *p)
8754{
8755 struct tg3 *tp = netdev_priv(dev);
8756 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07008757 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008758
Michael Chanf9804dd2005-09-27 12:13:10 -07008759 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka504f9b52012-02-21 02:07:49 +00008760 return -EADDRNOTAVAIL;
Michael Chanf9804dd2005-09-27 12:13:10 -07008761
Linus Torvalds1da177e2005-04-16 15:20:36 -07008762 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8763
Michael Chane75f7c92006-03-20 21:33:26 -08008764 if (!netif_running(dev))
8765 return 0;
8766
Joe Perches63c3a662011-04-26 08:12:10 +00008767 if (tg3_flag(tp, ENABLE_ASF)) {
Michael Chan986e0ae2007-05-05 12:10:20 -07008768 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07008769
Michael Chan986e0ae2007-05-05 12:10:20 -07008770 addr0_high = tr32(MAC_ADDR_0_HIGH);
8771 addr0_low = tr32(MAC_ADDR_0_LOW);
8772 addr1_high = tr32(MAC_ADDR_1_HIGH);
8773 addr1_low = tr32(MAC_ADDR_1_LOW);
8774
8775 /* Skip MAC addr 1 if ASF is using it. */
8776 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8777 !(addr1_high == 0 && addr1_low == 0))
8778 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07008779 }
Michael Chan986e0ae2007-05-05 12:10:20 -07008780 spin_lock_bh(&tp->lock);
8781 __tg3_set_mac_addr(tp, skip_mac_1);
8782 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008783
Michael Chanb9ec6c12006-07-25 16:37:27 -07008784 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008785}
8786
8787/* tp->lock is held. */
8788static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8789 dma_addr_t mapping, u32 maxlen_flags,
8790 u32 nic_addr)
8791{
8792 tg3_write_mem(tp,
8793 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8794 ((u64) mapping >> 32));
8795 tg3_write_mem(tp,
8796 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8797 ((u64) mapping & 0xffffffff));
8798 tg3_write_mem(tp,
8799 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8800 maxlen_flags);
8801
Joe Perches63c3a662011-04-26 08:12:10 +00008802 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008803 tg3_write_mem(tp,
8804 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8805 nic_addr);
8806}
8807
Michael Chana489b6d2012-09-28 07:12:39 +00008808
8809static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07008810{
Michael Chana489b6d2012-09-28 07:12:39 +00008811 int i = 0;
Matt Carlsonb6080e12009-09-01 13:12:00 +00008812
Joe Perches63c3a662011-04-26 08:12:10 +00008813 if (!tg3_flag(tp, ENABLE_TSS)) {
Matt Carlsonb6080e12009-09-01 13:12:00 +00008814 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8815 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8816 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
Matt Carlsonb6080e12009-09-01 13:12:00 +00008817 } else {
8818 tw32(HOSTCC_TXCOL_TICKS, 0);
8819 tw32(HOSTCC_TXMAX_FRAMES, 0);
8820 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
Michael Chana489b6d2012-09-28 07:12:39 +00008821
8822 for (; i < tp->txq_cnt; i++) {
8823 u32 reg;
8824
8825 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8826 tw32(reg, ec->tx_coalesce_usecs);
8827 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8828 tw32(reg, ec->tx_max_coalesced_frames);
8829 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8830 tw32(reg, ec->tx_max_coalesced_frames_irq);
8831 }
Matt Carlson19cfaec2009-12-03 08:36:20 +00008832 }
Matt Carlsonb6080e12009-09-01 13:12:00 +00008833
Michael Chana489b6d2012-09-28 07:12:39 +00008834 for (; i < tp->irq_max - 1; i++) {
8835 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8836 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8837 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8838 }
8839}
8840
8841static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8842{
8843 int i = 0;
8844 u32 limit = tp->rxq_cnt;
8845
Joe Perches63c3a662011-04-26 08:12:10 +00008846 if (!tg3_flag(tp, ENABLE_RSS)) {
Matt Carlson19cfaec2009-12-03 08:36:20 +00008847 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8848 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8849 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
Michael Chana489b6d2012-09-28 07:12:39 +00008850 limit--;
Matt Carlson19cfaec2009-12-03 08:36:20 +00008851 } else {
Matt Carlsonb6080e12009-09-01 13:12:00 +00008852 tw32(HOSTCC_RXCOL_TICKS, 0);
8853 tw32(HOSTCC_RXMAX_FRAMES, 0);
8854 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
David S. Miller15f98502005-05-18 22:49:26 -07008855 }
Matt Carlsonb6080e12009-09-01 13:12:00 +00008856
Michael Chana489b6d2012-09-28 07:12:39 +00008857 for (; i < limit; i++) {
8858 u32 reg;
8859
8860 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8861 tw32(reg, ec->rx_coalesce_usecs);
8862 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8863 tw32(reg, ec->rx_max_coalesced_frames);
8864 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8865 tw32(reg, ec->rx_max_coalesced_frames_irq);
8866 }
8867
8868 for (; i < tp->irq_max - 1; i++) {
8869 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8870 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8871 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8872 }
8873}
8874
8875static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8876{
8877 tg3_coal_tx_init(tp, ec);
8878 tg3_coal_rx_init(tp, ec);
8879
Joe Perches63c3a662011-04-26 08:12:10 +00008880 if (!tg3_flag(tp, 5705_PLUS)) {
David S. Miller15f98502005-05-18 22:49:26 -07008881 u32 val = ec->stats_block_coalesce_usecs;
8882
Matt Carlsonb6080e12009-09-01 13:12:00 +00008883 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8884 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8885
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +00008886 if (!tp->link_up)
David S. Miller15f98502005-05-18 22:49:26 -07008887 val = 0;
8888
8889 tw32(HOSTCC_STAT_COAL_TICKS, val);
8890 }
8891}
Linus Torvalds1da177e2005-04-16 15:20:36 -07008892
8893/* tp->lock is held. */
Matt Carlson2d31eca2009-09-01 12:53:31 +00008894static void tg3_rings_reset(struct tg3 *tp)
8895{
8896 int i;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008897 u32 stblk, txrcb, rxrcb, limit;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008898 struct tg3_napi *tnapi = &tp->napi[0];
8899
8900 /* Disable all transmit rings but the first. */
Joe Perches63c3a662011-04-26 08:12:10 +00008901 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008902 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
Joe Perches63c3a662011-04-26 08:12:10 +00008903 else if (tg3_flag(tp, 5717_PLUS))
Matt Carlson3d377282010-10-14 10:37:39 +00008904 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
Michael Chanc65a17f2013-01-06 12:51:07 +00008905 else if (tg3_flag(tp, 57765_CLASS) ||
Joe Perches41535772013-02-16 11:20:04 +00008906 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlsonb703df62009-12-03 08:36:21 +00008907 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008908 else
8909 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8910
8911 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8912 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8913 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8914 BDINFO_FLAGS_DISABLED);
8915
8916
8917 /* Disable all receive return rings but the first. */
Joe Perches63c3a662011-04-26 08:12:10 +00008918 if (tg3_flag(tp, 5717_PLUS))
Matt Carlsonf6eb9b12009-09-01 13:19:53 +00008919 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
Joe Perches63c3a662011-04-26 08:12:10 +00008920 else if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008921 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
Joe Perches41535772013-02-16 11:20:04 +00008922 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8923 tg3_asic_rev(tp) == ASIC_REV_5762 ||
Matt Carlson55086ad2011-12-14 11:09:59 +00008924 tg3_flag(tp, 57765_CLASS))
Matt Carlson2d31eca2009-09-01 12:53:31 +00008925 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8926 else
8927 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8928
8929 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8930 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8931 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8932 BDINFO_FLAGS_DISABLED);
8933
8934 /* Disable interrupts */
8935 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00008936 tp->napi[0].chk_msi_cnt = 0;
8937 tp->napi[0].last_rx_cons = 0;
8938 tp->napi[0].last_tx_cons = 0;
Matt Carlson2d31eca2009-09-01 12:53:31 +00008939
8940 /* Zero mailbox registers. */
Joe Perches63c3a662011-04-26 08:12:10 +00008941 if (tg3_flag(tp, SUPPORT_MSIX)) {
Matt Carlson6fd45cb2010-09-15 08:59:57 +00008942 for (i = 1; i < tp->irq_max; i++) {
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008943 tp->napi[i].tx_prod = 0;
8944 tp->napi[i].tx_cons = 0;
Joe Perches63c3a662011-04-26 08:12:10 +00008945 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc2353a32010-01-20 16:58:08 +00008946 tw32_mailbox(tp->napi[i].prodmbox, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008947 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8948 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
Matt Carlson7f230732011-08-31 11:44:48 +00008949 tp->napi[i].chk_msi_cnt = 0;
Matt Carlson0e6cf6a2011-06-13 13:38:55 +00008950 tp->napi[i].last_rx_cons = 0;
8951 tp->napi[i].last_tx_cons = 0;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008952 }
Joe Perches63c3a662011-04-26 08:12:10 +00008953 if (!tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc2353a32010-01-20 16:58:08 +00008954 tw32_mailbox(tp->napi[0].prodmbox, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008955 } else {
8956 tp->napi[0].tx_prod = 0;
8957 tp->napi[0].tx_cons = 0;
8958 tw32_mailbox(tp->napi[0].prodmbox, 0);
8959 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8960 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008961
8962 /* Make sure the NIC-based send BD rings are disabled. */
Joe Perches63c3a662011-04-26 08:12:10 +00008963 if (!tg3_flag(tp, 5705_PLUS)) {
Matt Carlson2d31eca2009-09-01 12:53:31 +00008964 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8965 for (i = 0; i < 16; i++)
8966 tw32_tx_mbox(mbox + i * 8, 0);
8967 }
8968
8969 txrcb = NIC_SRAM_SEND_RCB;
8970 rxrcb = NIC_SRAM_RCV_RET_RCB;
8971
8972 /* Clear status block in ram. */
8973 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8974
8975 /* Set status block DMA address */
8976 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8977 ((u64) tnapi->status_mapping >> 32));
8978 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8979 ((u64) tnapi->status_mapping & 0xffffffff));
8980
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008981 if (tnapi->tx_ring) {
8982 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8983 (TG3_TX_RING_SIZE <<
8984 BDINFO_FLAGS_MAXLEN_SHIFT),
8985 NIC_SRAM_TX_BUFFER_DESC);
8986 txrcb += TG3_BDINFO_SIZE;
8987 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00008988
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008989 if (tnapi->rx_rcb) {
8990 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
Matt Carlson7cb32cf2010-09-30 10:34:36 +00008991 (tp->rx_ret_ring_mask + 1) <<
8992 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
Matt Carlsonf77a6a82009-09-01 13:04:37 +00008993 rxrcb += TG3_BDINFO_SIZE;
8994 }
8995
8996 stblk = HOSTCC_STATBLCK_RING1;
8997
8998 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8999 u64 mapping = (u64)tnapi->status_mapping;
9000 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9001 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9002
9003 /* Clear status block in ram. */
9004 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9005
Matt Carlson19cfaec2009-12-03 08:36:20 +00009006 if (tnapi->tx_ring) {
9007 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9008 (TG3_TX_RING_SIZE <<
9009 BDINFO_FLAGS_MAXLEN_SHIFT),
9010 NIC_SRAM_TX_BUFFER_DESC);
9011 txrcb += TG3_BDINFO_SIZE;
9012 }
Matt Carlsonf77a6a82009-09-01 13:04:37 +00009013
9014 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009015 ((tp->rx_ret_ring_mask + 1) <<
Matt Carlsonf77a6a82009-09-01 13:04:37 +00009016 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9017
9018 stblk += 8;
Matt Carlsonf77a6a82009-09-01 13:04:37 +00009019 rxrcb += TG3_BDINFO_SIZE;
9020 }
Matt Carlson2d31eca2009-09-01 12:53:31 +00009021}
9022
Matt Carlsoneb07a942011-04-20 07:57:36 +00009023static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9024{
9025 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9026
Joe Perches63c3a662011-04-26 08:12:10 +00009027 if (!tg3_flag(tp, 5750_PLUS) ||
9028 tg3_flag(tp, 5780_CLASS) ||
Joe Perches41535772013-02-16 11:20:04 +00009029 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9030 tg3_asic_rev(tp) == ASIC_REV_5752 ||
Matt Carlson513aa6e2011-11-21 15:01:18 +00009031 tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00009032 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
Joe Perches41535772013-02-16 11:20:04 +00009033 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9034 tg3_asic_rev(tp) == ASIC_REV_5787)
Matt Carlsoneb07a942011-04-20 07:57:36 +00009035 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9036 else
9037 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9038
9039 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9040 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9041
9042 val = min(nic_rep_thresh, host_rep_thresh);
9043 tw32(RCVBDI_STD_THRESH, val);
9044
Joe Perches63c3a662011-04-26 08:12:10 +00009045 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00009046 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9047
Joe Perches63c3a662011-04-26 08:12:10 +00009048 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00009049 return;
9050
Matt Carlson513aa6e2011-11-21 15:01:18 +00009051 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
Matt Carlsoneb07a942011-04-20 07:57:36 +00009052
9053 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9054
9055 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9056 tw32(RCVBDI_JUMBO_THRESH, val);
9057
Joe Perches63c3a662011-04-26 08:12:10 +00009058 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoneb07a942011-04-20 07:57:36 +00009059 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9060}
9061
Matt Carlsonccd5ba92012-02-13 10:20:08 +00009062static inline u32 calc_crc(unsigned char *buf, int len)
9063{
9064 u32 reg;
9065 u32 tmp;
9066 int j, k;
9067
9068 reg = 0xffffffff;
9069
9070 for (j = 0; j < len; j++) {
9071 reg ^= buf[j];
9072
9073 for (k = 0; k < 8; k++) {
9074 tmp = reg & 0x01;
9075
9076 reg >>= 1;
9077
9078 if (tmp)
9079 reg ^= 0xedb88320;
9080 }
9081 }
9082
9083 return ~reg;
9084}
9085
9086static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9087{
9088 /* accept or reject all multicast frames */
9089 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9090 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9091 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9092 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9093}
9094
9095static void __tg3_set_rx_mode(struct net_device *dev)
9096{
9097 struct tg3 *tp = netdev_priv(dev);
9098 u32 rx_mode;
9099
9100 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9101 RX_MODE_KEEP_VLAN_TAG);
9102
9103#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9104 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9105 * flag clear.
9106 */
9107 if (!tg3_flag(tp, ENABLE_ASF))
9108 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9109#endif
9110
9111 if (dev->flags & IFF_PROMISC) {
9112 /* Promiscuous mode. */
9113 rx_mode |= RX_MODE_PROMISC;
9114 } else if (dev->flags & IFF_ALLMULTI) {
9115 /* Accept all multicast. */
9116 tg3_set_multi(tp, 1);
9117 } else if (netdev_mc_empty(dev)) {
9118 /* Reject all multicast. */
9119 tg3_set_multi(tp, 0);
9120 } else {
9121 /* Accept one or more multicast(s). */
9122 struct netdev_hw_addr *ha;
9123 u32 mc_filter[4] = { 0, };
9124 u32 regidx;
9125 u32 bit;
9126 u32 crc;
9127
9128 netdev_for_each_mc_addr(ha, dev) {
9129 crc = calc_crc(ha->addr, ETH_ALEN);
9130 bit = ~crc & 0x7f;
9131 regidx = (bit & 0x60) >> 5;
9132 bit &= 0x1f;
9133 mc_filter[regidx] |= (1 << bit);
9134 }
9135
9136 tw32(MAC_HASH_REG_0, mc_filter[0]);
9137 tw32(MAC_HASH_REG_1, mc_filter[1]);
9138 tw32(MAC_HASH_REG_2, mc_filter[2]);
9139 tw32(MAC_HASH_REG_3, mc_filter[3]);
9140 }
9141
9142 if (rx_mode != tp->rx_mode) {
9143 tp->rx_mode = rx_mode;
9144 tw32_f(MAC_RX_MODE, rx_mode);
9145 udelay(10);
9146 }
9147}
9148
Michael Chan91024262012-09-28 07:12:38 +00009149static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
Matt Carlson90415472011-12-16 13:33:23 +00009150{
9151 int i;
9152
9153 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
Michael Chan91024262012-09-28 07:12:38 +00009154 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
Matt Carlson90415472011-12-16 13:33:23 +00009155}
9156
9157static void tg3_rss_check_indir_tbl(struct tg3 *tp)
Matt Carlsonbcebcc42011-12-14 11:10:01 +00009158{
9159 int i;
9160
9161 if (!tg3_flag(tp, SUPPORT_MSIX))
9162 return;
9163
Michael Chan0b3ba052012-11-14 14:44:29 +00009164 if (tp->rxq_cnt == 1) {
Matt Carlsonbcebcc42011-12-14 11:10:01 +00009165 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
Matt Carlson90415472011-12-16 13:33:23 +00009166 return;
9167 }
9168
9169 /* Validate table against current IRQ count */
9170 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
Michael Chan0b3ba052012-11-14 14:44:29 +00009171 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
Matt Carlson90415472011-12-16 13:33:23 +00009172 break;
9173 }
9174
9175 if (i != TG3_RSS_INDIR_TBL_SIZE)
Michael Chan91024262012-09-28 07:12:38 +00009176 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
Matt Carlsonbcebcc42011-12-14 11:10:01 +00009177}
9178
Matt Carlson90415472011-12-16 13:33:23 +00009179static void tg3_rss_write_indir_tbl(struct tg3 *tp)
Matt Carlsonbcebcc42011-12-14 11:10:01 +00009180{
9181 int i = 0;
9182 u32 reg = MAC_RSS_INDIR_TBL_0;
9183
9184 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9185 u32 val = tp->rss_ind_tbl[i];
9186 i++;
9187 for (; i % 8; i++) {
9188 val <<= 4;
9189 val |= tp->rss_ind_tbl[i];
9190 }
9191 tw32(reg, val);
9192 reg += 4;
9193 }
9194}
9195
Matt Carlson2d31eca2009-09-01 12:53:31 +00009196/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07009197static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009198{
9199 u32 val, rdmac_mode;
9200 int i, err, limit;
Matt Carlson8fea32b2010-09-15 08:59:58 +00009201 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009202
9203 tg3_disable_ints(tp);
9204
9205 tg3_stop_fw(tp);
9206
9207 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9208
Joe Perches63c3a662011-04-26 08:12:10 +00009209 if (tg3_flag(tp, INIT_COMPLETE))
Michael Chane6de8ad2005-05-05 14:42:41 -07009210 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009211
Matt Carlson699c0192010-12-06 08:28:51 +00009212 /* Enable MAC control of LPI */
9213 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
Michael Chanc65a17f2013-01-06 12:51:07 +00009214 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9215 TG3_CPMU_EEE_LNKIDL_UART_IDL;
Joe Perches41535772013-02-16 11:20:04 +00009216 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
Michael Chanc65a17f2013-01-06 12:51:07 +00009217 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9218
9219 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
Matt Carlson699c0192010-12-06 08:28:51 +00009220
9221 tw32_f(TG3_CPMU_EEE_CTRL,
9222 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9223
Matt Carlsona386b902010-12-06 08:28:53 +00009224 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9225 TG3_CPMU_EEEMD_LPI_IN_TX |
9226 TG3_CPMU_EEEMD_LPI_IN_RX |
9227 TG3_CPMU_EEEMD_EEE_ENABLE;
9228
Joe Perches41535772013-02-16 11:20:04 +00009229 if (tg3_asic_rev(tp) != ASIC_REV_5717)
Matt Carlsona386b902010-12-06 08:28:53 +00009230 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9231
Joe Perches63c3a662011-04-26 08:12:10 +00009232 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsona386b902010-12-06 08:28:53 +00009233 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9234
9235 tw32_f(TG3_CPMU_EEE_MODE, val);
9236
9237 tw32_f(TG3_CPMU_EEE_DBTMR1,
9238 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9239 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9240
9241 tw32_f(TG3_CPMU_EEE_DBTMR2,
Matt Carlsond7f2ab22011-01-25 15:58:56 +00009242 TG3_CPMU_DBTMR2_APE_TX_2047US |
Matt Carlsona386b902010-12-06 08:28:53 +00009243 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
Matt Carlson699c0192010-12-06 08:28:51 +00009244 }
9245
Matt Carlson603f1172010-02-12 14:47:10 +00009246 if (reset_phy)
Michael Chand4d2c552006-03-20 17:47:20 -08009247 tg3_phy_reset(tp);
9248
Linus Torvalds1da177e2005-04-16 15:20:36 -07009249 err = tg3_chip_reset(tp);
9250 if (err)
9251 return err;
9252
9253 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9254
Joe Perches41535772013-02-16 11:20:04 +00009255 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07009256 val = tr32(TG3_CPMU_CTRL);
9257 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9258 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08009259
9260 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9261 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9262 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9263 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9264
9265 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9266 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9267 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9268 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9269
9270 val = tr32(TG3_CPMU_HST_ACC);
9271 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9272 val |= CPMU_HST_ACC_MACCLK_6_25;
9273 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07009274 }
9275
Joe Perches41535772013-02-16 11:20:04 +00009276 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
Matt Carlson33466d92009-04-20 06:57:41 +00009277 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9278 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9279 PCIE_PWR_MGMT_L1_THRESH_4MS;
9280 tw32(PCIE_PWR_MGMT_THRESH, val);
Matt Carlson521e6b92009-08-25 10:06:01 +00009281
9282 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9283 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9284
9285 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
Matt Carlson33466d92009-04-20 06:57:41 +00009286
Matt Carlsonf40386c2009-11-02 14:24:02 +00009287 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9288 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
Matt Carlson255ca312009-08-25 10:07:27 +00009289 }
9290
Joe Perches63c3a662011-04-26 08:12:10 +00009291 if (tg3_flag(tp, L1PLLPD_EN)) {
Matt Carlson614b0592010-01-20 16:58:02 +00009292 u32 grc_mode = tr32(GRC_MODE);
9293
9294 /* Access the lower 1K of PL PCIE block registers. */
9295 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9296 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9297
9298 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9299 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9300 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9301
9302 tw32(GRC_MODE, grc_mode);
9303 }
9304
Matt Carlson55086ad2011-12-14 11:09:59 +00009305 if (tg3_flag(tp, 57765_CLASS)) {
Joe Perches41535772013-02-16 11:20:04 +00009306 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
Matt Carlson5093eed2010-11-24 08:31:45 +00009307 u32 grc_mode = tr32(GRC_MODE);
Matt Carlsoncea46462010-04-12 06:58:24 +00009308
Matt Carlson5093eed2010-11-24 08:31:45 +00009309 /* Access the lower 1K of PL PCIE block registers. */
9310 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9311 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
Matt Carlsoncea46462010-04-12 06:58:24 +00009312
Matt Carlson5093eed2010-11-24 08:31:45 +00009313 val = tr32(TG3_PCIE_TLDLPL_PORT +
9314 TG3_PCIE_PL_LO_PHYCTL5);
9315 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9316 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
Matt Carlsoncea46462010-04-12 06:58:24 +00009317
Matt Carlson5093eed2010-11-24 08:31:45 +00009318 tw32(GRC_MODE, grc_mode);
9319 }
Matt Carlsona977dbe2010-04-12 06:58:26 +00009320
Joe Perches41535772013-02-16 11:20:04 +00009321 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
Matt Carlsond3f677a2013-02-14 14:27:51 +00009322 u32 grc_mode;
9323
9324 /* Fix transmit hangs */
9325 val = tr32(TG3_CPMU_PADRNG_CTL);
9326 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9327 tw32(TG3_CPMU_PADRNG_CTL, val);
9328
9329 grc_mode = tr32(GRC_MODE);
Matt Carlson1ff30a52011-05-19 12:12:46 +00009330
9331 /* Access the lower 1K of DL PCIE block registers. */
9332 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9333 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9334
9335 val = tr32(TG3_PCIE_TLDLPL_PORT +
9336 TG3_PCIE_DL_LO_FTSMAX);
9337 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9338 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9339 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9340
9341 tw32(GRC_MODE, grc_mode);
9342 }
9343
Matt Carlsona977dbe2010-04-12 06:58:26 +00009344 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9345 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9346 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9347 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
Matt Carlsoncea46462010-04-12 06:58:24 +00009348 }
9349
Linus Torvalds1da177e2005-04-16 15:20:36 -07009350 /* This works around an issue with Athlon chipsets on
9351 * B3 tigon3 silicon. This bit has no effect on any
9352 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07009353 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009354 */
Joe Perches63c3a662011-04-26 08:12:10 +00009355 if (!tg3_flag(tp, CPMU_PRESENT)) {
9356 if (!tg3_flag(tp, PCI_EXPRESS))
Matt Carlson795d01c2007-10-07 23:28:17 -07009357 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9358 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360
Joe Perches41535772013-02-16 11:20:04 +00009361 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
Joe Perches63c3a662011-04-26 08:12:10 +00009362 tg3_flag(tp, PCIX_MODE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009363 val = tr32(TG3PCI_PCISTATE);
9364 val |= PCISTATE_RETRY_SAME_DMA;
9365 tw32(TG3PCI_PCISTATE, val);
9366 }
9367
Joe Perches63c3a662011-04-26 08:12:10 +00009368 if (tg3_flag(tp, ENABLE_APE)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -07009369 /* Allow reads and writes to the
9370 * APE register and memory space.
9371 */
9372 val = tr32(TG3PCI_PCISTATE);
9373 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +00009374 PCISTATE_ALLOW_APE_SHMEM_WR |
9375 PCISTATE_ALLOW_APE_PSPACE_WR;
Matt Carlson0d3031d2007-10-10 18:02:43 -07009376 tw32(TG3PCI_PCISTATE, val);
9377 }
9378
Joe Perches41535772013-02-16 11:20:04 +00009379 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009380 /* Enable some hw fixes. */
9381 val = tr32(TG3PCI_MSI_DATA);
9382 val |= (1 << 26) | (1 << 28) | (1 << 29);
9383 tw32(TG3PCI_MSI_DATA, val);
9384 }
9385
9386 /* Descriptor ring init may make accesses to the
9387 * NIC SRAM area to setup the TX descriptors, so we
9388 * can only do this after the hardware has been
9389 * successfully reset.
9390 */
Michael Chan32d8c572006-07-25 16:38:29 -07009391 err = tg3_init_rings(tp);
9392 if (err)
9393 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009394
Joe Perches63c3a662011-04-26 08:12:10 +00009395 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsoncbf9ca62009-11-13 13:03:40 +00009396 val = tr32(TG3PCI_DMA_RW_CTRL) &
9397 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
Joe Perches41535772013-02-16 11:20:04 +00009398 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
Matt Carlson1a319022010-04-12 06:58:25 +00009399 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
Matt Carlson55086ad2011-12-14 11:09:59 +00009400 if (!tg3_flag(tp, 57765_CLASS) &&
Joe Perches41535772013-02-16 11:20:04 +00009401 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9402 tg3_asic_rev(tp) != ASIC_REV_5762)
Matt Carlson0aebff42011-04-25 12:42:45 +00009403 val |= DMA_RWCTRL_TAGGED_STAT_WA;
Matt Carlsoncbf9ca62009-11-13 13:03:40 +00009404 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
Joe Perches41535772013-02-16 11:20:04 +00009405 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9406 tg3_asic_rev(tp) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07009407 /* This value is determined during the probe time DMA
9408 * engine test, tg3_test_dma.
9409 */
9410 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009412
9413 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9414 GRC_MODE_4X_NIC_SEND_RINGS |
9415 GRC_MODE_NO_TX_PHDR_CSUM |
9416 GRC_MODE_NO_RX_PHDR_CSUM);
9417 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07009418
9419 /* Pseudo-header checksum is done by hardware logic and not
9420 * the offload processers, so make the chip do the pseudo-
9421 * header checksums on receive. For transmit it is more
9422 * convenient to do the pseudo-header checksum in software
9423 * as Linux does that on transmit for us in all cases.
9424 */
9425 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009426
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +00009427 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9428 if (tp->rxptpctl)
9429 tw32(TG3_RX_PTP_CTL,
9430 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9431
9432 if (tg3_flag(tp, PTP_CAPABLE))
9433 val |= GRC_MODE_TIME_SYNC_ENABLE;
9434
9435 tw32(GRC_MODE, tp->grc_mode | val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009436
9437 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9438 val = tr32(GRC_MISC_CFG);
9439 val &= ~0xff;
9440 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9441 tw32(GRC_MISC_CFG, val);
9442
9443 /* Initialize MBUF/DESC pool. */
Joe Perches63c3a662011-04-26 08:12:10 +00009444 if (tg3_flag(tp, 5750_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009445 /* Do nothing. */
Joe Perches41535772013-02-16 11:20:04 +00009446 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009447 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
Joe Perches41535772013-02-16 11:20:04 +00009448 if (tg3_asic_rev(tp) == ASIC_REV_5704)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009449 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9450 else
9451 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9452 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9453 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
Joe Perches63c3a662011-04-26 08:12:10 +00009454 } else if (tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009455 int fw_len;
9456
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -08009457 fw_len = tp->fw_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009458 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9459 tw32(BUFMGR_MB_POOL_ADDR,
9460 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9461 tw32(BUFMGR_MB_POOL_SIZE,
9462 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009464
Michael Chan0f893dc2005-07-25 12:30:38 -07009465 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009466 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9467 tp->bufmgr_config.mbuf_read_dma_low_water);
9468 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9469 tp->bufmgr_config.mbuf_mac_rx_low_water);
9470 tw32(BUFMGR_MB_HIGH_WATER,
9471 tp->bufmgr_config.mbuf_high_water);
9472 } else {
9473 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9474 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9475 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9476 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9477 tw32(BUFMGR_MB_HIGH_WATER,
9478 tp->bufmgr_config.mbuf_high_water_jumbo);
9479 }
9480 tw32(BUFMGR_DMA_LOW_WATER,
9481 tp->bufmgr_config.dma_low_water);
9482 tw32(BUFMGR_DMA_HIGH_WATER,
9483 tp->bufmgr_config.dma_high_water);
9484
Matt Carlsond309a462010-09-30 10:34:31 +00009485 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
Joe Perches41535772013-02-16 11:20:04 +00009486 if (tg3_asic_rev(tp) == ASIC_REV_5719)
Matt Carlsond309a462010-09-30 10:34:31 +00009487 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
Joe Perches41535772013-02-16 11:20:04 +00009488 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9489 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9490 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
Matt Carlson4d958472011-04-20 07:57:35 +00009491 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
Matt Carlsond309a462010-09-30 10:34:31 +00009492 tw32(BUFMGR_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009493 for (i = 0; i < 2000; i++) {
9494 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9495 break;
9496 udelay(10);
9497 }
9498 if (i >= 2000) {
Joe Perches05dbe002010-02-17 19:44:19 +00009499 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009500 return -ENODEV;
9501 }
9502
Joe Perches41535772013-02-16 11:20:04 +00009503 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
Matt Carlsoneb07a942011-04-20 07:57:36 +00009504 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
Michael Chanb5d37722006-09-27 16:06:21 -07009505
Matt Carlsoneb07a942011-04-20 07:57:36 +00009506 tg3_setup_rxbd_thresholds(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009507
9508 /* Initialize TG3_BDINFO's at:
9509 * RCVDBDI_STD_BD: standard eth size rx ring
9510 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9511 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9512 *
9513 * like so:
9514 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9515 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9516 * ring attribute flags
9517 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9518 *
9519 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9520 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9521 *
9522 * The size of each ring is fixed in the firmware, but the location is
9523 * configurable.
9524 */
9525 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
Matt Carlson21f581a2009-08-28 14:00:25 +00009526 ((u64) tpr->rx_std_mapping >> 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009527 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
Matt Carlson21f581a2009-08-28 14:00:25 +00009528 ((u64) tpr->rx_std_mapping & 0xffffffff));
Joe Perches63c3a662011-04-26 08:12:10 +00009529 if (!tg3_flag(tp, 5717_PLUS))
Matt Carlson87668d32009-11-13 13:03:34 +00009530 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9531 NIC_SRAM_RX_BUFFER_DESC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009532
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009533 /* Disable the mini ring */
Joe Perches63c3a662011-04-26 08:12:10 +00009534 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009535 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9536 BDINFO_FLAGS_DISABLED);
9537
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009538 /* Program the jumbo buffer descriptor ring control
9539 * blocks on those devices that have them.
9540 */
Joe Perches41535772013-02-16 11:20:04 +00009541 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
Joe Perches63c3a662011-04-26 08:12:10 +00009542 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009543
Joe Perches63c3a662011-04-26 08:12:10 +00009544 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009545 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
Matt Carlson21f581a2009-08-28 14:00:25 +00009546 ((u64) tpr->rx_jmb_mapping >> 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009547 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
Matt Carlson21f581a2009-08-28 14:00:25 +00009548 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
Matt Carlsonde9f5232011-04-05 14:22:43 +00009549 val = TG3_RX_JMB_RING_SIZE(tp) <<
9550 BDINFO_FLAGS_MAXLEN_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009551 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
Matt Carlsonde9f5232011-04-05 14:22:43 +00009552 val | BDINFO_FLAGS_USE_EXT_RECV);
Joe Perches63c3a662011-04-26 08:12:10 +00009553 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
Michael Chanc65a17f2013-01-06 12:51:07 +00009554 tg3_flag(tp, 57765_CLASS) ||
Joe Perches41535772013-02-16 11:20:04 +00009555 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlson87668d32009-11-13 13:03:34 +00009556 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9557 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009558 } else {
9559 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9560 BDINFO_FLAGS_DISABLED);
9561 }
9562
Joe Perches63c3a662011-04-26 08:12:10 +00009563 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsonfa6b2aa2011-11-21 15:01:19 +00009564 val = TG3_RX_STD_RING_SIZE(tp);
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009565 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9566 val |= (TG3_RX_STD_DMA_SZ << 2);
9567 } else
Matt Carlson04380d42010-04-12 06:58:29 +00009568 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009569 } else
Matt Carlsonde9f5232011-04-05 14:22:43 +00009570 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
Matt Carlsonfdb72b32009-08-28 13:57:12 +00009571
9572 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009573
Matt Carlson411da642009-11-13 13:03:46 +00009574 tpr->rx_std_prod_idx = tp->rx_pending;
Matt Carlson66711e62009-11-13 13:03:49 +00009575 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009576
Joe Perches63c3a662011-04-26 08:12:10 +00009577 tpr->rx_jmb_prod_idx =
9578 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
Matt Carlson66711e62009-11-13 13:03:49 +00009579 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009580
Matt Carlson2d31eca2009-09-01 12:53:31 +00009581 tg3_rings_reset(tp);
9582
Linus Torvalds1da177e2005-04-16 15:20:36 -07009583 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07009584 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009585
9586 /* MTU + ethernet header + FCS + optional VLAN tag */
Matt Carlsonf7b493e2009-02-25 14:21:52 +00009587 tw32(MAC_RX_MTU_SIZE,
9588 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009589
9590 /* The slot time is changed by tg3_setup_phy if we
9591 * run at gigabit with half duplex.
9592 */
Matt Carlsonf2096f92011-04-05 14:22:48 +00009593 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9594 (6 << TX_LENGTHS_IPG_SHIFT) |
9595 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9596
Joe Perches41535772013-02-16 11:20:04 +00009597 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9598 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlsonf2096f92011-04-05 14:22:48 +00009599 val |= tr32(MAC_TX_LENGTHS) &
9600 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9601 TX_LENGTHS_CNT_DWN_VAL_MSK);
9602
9603 tw32(MAC_TX_LENGTHS, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009604
9605 /* Receive rules. */
9606 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9607 tw32(RCVLPC_CONFIG, 0x0181);
9608
9609 /* Calculate RDMAC_MODE setting early, we need it to determine
9610 * the RCVLPC_STATE_ENABLE mask.
9611 */
9612 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9613 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9614 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9615 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9616 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07009617
Joe Perches41535772013-02-16 11:20:04 +00009618 if (tg3_asic_rev(tp) == ASIC_REV_5717)
Matt Carlson0339e4e2010-02-12 14:47:09 +00009619 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9620
Joe Perches41535772013-02-16 11:20:04 +00009621 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9622 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9623 tg3_asic_rev(tp) == ASIC_REV_57780)
Matt Carlsond30cdd22007-10-07 23:28:35 -07009624 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9625 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9626 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9627
Joe Perches41535772013-02-16 11:20:04 +00009628 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9629 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +00009630 if (tg3_flag(tp, TSO_CAPABLE) &&
Joe Perches41535772013-02-16 11:20:04 +00009631 tg3_asic_rev(tp) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009632 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9633 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009634 !tg3_flag(tp, IS_5788)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009635 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9636 }
9637 }
9638
Joe Perches63c3a662011-04-26 08:12:10 +00009639 if (tg3_flag(tp, PCI_EXPRESS))
Michael Chan85e94ce2005-04-21 17:05:28 -07009640 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9641
Joe Perches41535772013-02-16 11:20:04 +00009642 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
Matt Carlsond3f677a2013-02-14 14:27:51 +00009643 tp->dma_limit = 0;
9644 if (tp->dev->mtu <= ETH_DATA_LEN) {
9645 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9646 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9647 }
9648 }
9649
Joe Perches63c3a662011-04-26 08:12:10 +00009650 if (tg3_flag(tp, HW_TSO_1) ||
9651 tg3_flag(tp, HW_TSO_2) ||
9652 tg3_flag(tp, HW_TSO_3))
Matt Carlson027455a2008-12-21 20:19:30 -08009653 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9654
Matt Carlson108a6c12011-05-19 12:12:47 +00009655 if (tg3_flag(tp, 57765_PLUS) ||
Joe Perches41535772013-02-16 11:20:04 +00009656 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9657 tg3_asic_rev(tp) == ASIC_REV_57780)
Matt Carlson027455a2008-12-21 20:19:30 -08009658 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009659
Joe Perches41535772013-02-16 11:20:04 +00009660 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9661 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlsonf2096f92011-04-05 14:22:48 +00009662 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9663
Joe Perches41535772013-02-16 11:20:04 +00009664 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9665 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9666 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9667 tg3_asic_rev(tp) == ASIC_REV_57780 ||
Joe Perches63c3a662011-04-26 08:12:10 +00009668 tg3_flag(tp, 57765_PLUS)) {
Michael Chanc65a17f2013-01-06 12:51:07 +00009669 u32 tgtreg;
9670
Joe Perches41535772013-02-16 11:20:04 +00009671 if (tg3_asic_rev(tp) == ASIC_REV_5762)
Michael Chanc65a17f2013-01-06 12:51:07 +00009672 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9673 else
9674 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9675
9676 val = tr32(tgtreg);
Joe Perches41535772013-02-16 11:20:04 +00009677 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9678 tg3_asic_rev(tp) == ASIC_REV_5762) {
Matt Carlsonb4495ed2011-01-25 15:58:47 +00009679 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9680 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9681 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9682 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9683 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9684 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
Matt Carlsonb75cc0e2010-11-24 08:31:46 +00009685 }
Michael Chanc65a17f2013-01-06 12:51:07 +00009686 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
Matt Carlson41a8a7e2010-09-15 08:59:53 +00009687 }
9688
Joe Perches41535772013-02-16 11:20:04 +00009689 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9690 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9691 tg3_asic_rev(tp) == ASIC_REV_5762) {
Michael Chanc65a17f2013-01-06 12:51:07 +00009692 u32 tgtreg;
9693
Joe Perches41535772013-02-16 11:20:04 +00009694 if (tg3_asic_rev(tp) == ASIC_REV_5762)
Michael Chanc65a17f2013-01-06 12:51:07 +00009695 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9696 else
9697 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9698
9699 val = tr32(tgtreg);
9700 tw32(tgtreg, val |
Matt Carlsond309a462010-09-30 10:34:31 +00009701 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9702 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9703 }
9704
Linus Torvalds1da177e2005-04-16 15:20:36 -07009705 /* Receive/send statistics. */
Joe Perches63c3a662011-04-26 08:12:10 +00009706 if (tg3_flag(tp, 5750_PLUS)) {
Michael Chan16613942006-06-29 20:15:13 -07009707 val = tr32(RCVLPC_STATS_ENABLE);
9708 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9709 tw32(RCVLPC_STATS_ENABLE, val);
9710 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009711 tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009712 val = tr32(RCVLPC_STATS_ENABLE);
9713 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9714 tw32(RCVLPC_STATS_ENABLE, val);
9715 } else {
9716 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9717 }
9718 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9719 tw32(SNDDATAI_STATSENAB, 0xffffff);
9720 tw32(SNDDATAI_STATSCTRL,
9721 (SNDDATAI_SCTRL_ENABLE |
9722 SNDDATAI_SCTRL_FASTUPD));
9723
9724 /* Setup host coalescing engine. */
9725 tw32(HOSTCC_MODE, 0);
9726 for (i = 0; i < 2000; i++) {
9727 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9728 break;
9729 udelay(10);
9730 }
9731
Michael Chand244c892005-07-05 14:42:33 -07009732 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009733
Joe Perches63c3a662011-04-26 08:12:10 +00009734 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009735 /* Status/statistics block address. See tg3_timer,
9736 * the tg3_periodic_fetch_stats call there, and
9737 * tg3_get_stats to see how this works for 5705/5750 chips.
9738 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009739 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9740 ((u64) tp->stats_mapping >> 32));
9741 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9742 ((u64) tp->stats_mapping & 0xffffffff));
9743 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
Matt Carlson2d31eca2009-09-01 12:53:31 +00009744
Linus Torvalds1da177e2005-04-16 15:20:36 -07009745 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
Matt Carlson2d31eca2009-09-01 12:53:31 +00009746
9747 /* Clear statistics and status block memory areas */
9748 for (i = NIC_SRAM_STATS_BLK;
9749 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9750 i += sizeof(u32)) {
9751 tg3_write_mem(tp, i, 0);
9752 udelay(40);
9753 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009754 }
9755
9756 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9757
9758 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9759 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009760 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009761 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9762
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009763 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9764 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
Michael Chanc94e3942005-09-27 12:12:42 -07009765 /* reset to prevent losing 1st rx packet intermittently */
9766 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9767 udelay(10);
9768 }
9769
Matt Carlson3bda1252008-08-15 14:08:22 -07009770 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Matt Carlson9e975cc2011-07-20 10:20:50 +00009771 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9772 MAC_MODE_FHDE_ENABLE;
9773 if (tg3_flag(tp, ENABLE_APE))
9774 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
Joe Perches63c3a662011-04-26 08:12:10 +00009775 if (!tg3_flag(tp, 5705_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009776 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
Joe Perches41535772013-02-16 11:20:04 +00009777 tg3_asic_rev(tp) != ASIC_REV_5700)
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009778 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009779 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9780 udelay(40);
9781
Michael Chan314fba32005-04-21 17:07:04 -07009782 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Joe Perches63c3a662011-04-26 08:12:10 +00009783 * If TG3_FLAG_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07009784 * register to preserve the GPIO settings for LOMs. The GPIOs,
9785 * whether used as inputs or outputs, are set by boot code after
9786 * reset.
9787 */
Joe Perches63c3a662011-04-26 08:12:10 +00009788 if (!tg3_flag(tp, IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07009789 u32 gpio_mask;
9790
Michael Chan9d26e212006-12-07 00:21:14 -08009791 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9792 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9793 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07009794
Joe Perches41535772013-02-16 11:20:04 +00009795 if (tg3_asic_rev(tp) == ASIC_REV_5752)
Michael Chan3e7d83b2005-04-21 17:10:36 -07009796 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9797 GRC_LCLCTRL_GPIO_OUTPUT3;
9798
Joe Perches41535772013-02-16 11:20:04 +00009799 if (tg3_asic_rev(tp) == ASIC_REV_5755)
Michael Chanaf36e6b2006-03-23 01:28:06 -08009800 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9801
Gary Zambranoaaf84462007-05-05 11:51:45 -07009802 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07009803 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9804
9805 /* GPIO1 must be driven high for eeprom write protect */
Joe Perches63c3a662011-04-26 08:12:10 +00009806 if (tg3_flag(tp, EEPROM_WRITE_PROT))
Michael Chan9d26e212006-12-07 00:21:14 -08009807 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9808 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07009809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009810 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9811 udelay(100);
9812
Matt Carlsonc3b50032012-01-17 15:27:23 +00009813 if (tg3_flag(tp, USING_MSIX)) {
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009814 val = tr32(MSGINT_MODE);
Matt Carlsonc3b50032012-01-17 15:27:23 +00009815 val |= MSGINT_MODE_ENABLE;
9816 if (tp->irq_cnt > 1)
9817 val |= MSGINT_MODE_MULTIVEC_EN;
Matt Carlson5b39de92011-08-31 11:44:50 +00009818 if (!tg3_flag(tp, 1SHOT_MSI))
9819 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009820 tw32(MSGINT_MODE, val);
9821 }
9822
Joe Perches63c3a662011-04-26 08:12:10 +00009823 if (!tg3_flag(tp, 5705_PLUS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009824 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9825 udelay(40);
9826 }
9827
9828 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9829 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9830 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9831 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9832 WDMAC_MODE_LNGREAD_ENAB);
9833
Joe Perches41535772013-02-16 11:20:04 +00009834 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9835 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +00009836 if (tg3_flag(tp, TSO_CAPABLE) &&
Joe Perches41535772013-02-16 11:20:04 +00009837 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9838 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009839 /* nothing */
9840 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
Joe Perches63c3a662011-04-26 08:12:10 +00009841 !tg3_flag(tp, IS_5788)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009842 val |= WDMAC_MODE_RX_ACCEL;
9843 }
9844 }
9845
Michael Chand9ab5ad12006-03-20 22:27:35 -08009846 /* Enable host coalescing bug fix */
Joe Perches63c3a662011-04-26 08:12:10 +00009847 if (tg3_flag(tp, 5755_PLUS))
Matt Carlsonf51f3562008-05-25 23:45:08 -07009848 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad12006-03-20 22:27:35 -08009849
Joe Perches41535772013-02-16 11:20:04 +00009850 if (tg3_asic_rev(tp) == ASIC_REV_5785)
Matt Carlson788a0352009-11-02 14:26:03 +00009851 val |= WDMAC_MODE_BURST_ALL_DATA;
9852
Linus Torvalds1da177e2005-04-16 15:20:36 -07009853 tw32_f(WDMAC_MODE, val);
9854 udelay(40);
9855
Joe Perches63c3a662011-04-26 08:12:10 +00009856 if (tg3_flag(tp, PCIX_MODE)) {
Matt Carlson9974a352007-10-07 23:27:28 -07009857 u16 pcix_cmd;
9858
9859 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9860 &pcix_cmd);
Joe Perches41535772013-02-16 11:20:04 +00009861 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07009862 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9863 pcix_cmd |= PCI_X_CMD_READ_2K;
Joe Perches41535772013-02-16 11:20:04 +00009864 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07009865 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9866 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867 }
Matt Carlson9974a352007-10-07 23:27:28 -07009868 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9869 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009870 }
9871
9872 tw32_f(RDMAC_MODE, rdmac_mode);
9873 udelay(40);
9874
Joe Perches41535772013-02-16 11:20:04 +00009875 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
Michael Chan091f0ea2012-07-29 19:15:43 +00009876 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9877 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9878 break;
9879 }
9880 if (i < TG3_NUM_RDMA_CHANNELS) {
9881 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9882 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9883 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9884 tg3_flag_set(tp, 5719_RDMA_BUG);
9885 }
9886 }
9887
Linus Torvalds1da177e2005-04-16 15:20:36 -07009888 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009889 if (!tg3_flag(tp, 5705_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009890 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07009891
Joe Perches41535772013-02-16 11:20:04 +00009892 if (tg3_asic_rev(tp) == ASIC_REV_5761)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009893 tw32(SNDDATAC_MODE,
9894 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9895 else
9896 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9897
Linus Torvalds1da177e2005-04-16 15:20:36 -07009898 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9899 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009900 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
Joe Perches63c3a662011-04-26 08:12:10 +00009901 if (tg3_flag(tp, LRG_PROD_RING_CAP))
Matt Carlson7cb32cf2010-09-30 10:34:36 +00009902 val |= RCVDBDI_MODE_LRG_RING_SZ;
9903 tw32(RCVDBDI_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009904 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Joe Perches63c3a662011-04-26 08:12:10 +00009905 if (tg3_flag(tp, HW_TSO_1) ||
9906 tg3_flag(tp, HW_TSO_2) ||
9907 tg3_flag(tp, HW_TSO_3))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009908 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009909 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00009910 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009911 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9912 tw32(SNDBDI_MODE, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009913 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9914
Joe Perches41535772013-02-16 11:20:04 +00009915 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009916 err = tg3_load_5701_a0_firmware_fix(tp);
9917 if (err)
9918 return err;
9919 }
9920
Nithin Sujirc4dab502013-03-06 17:02:34 +00009921 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9922 /* Ignore any errors for the firmware download. If download
9923 * fails, the device will operate with EEE disabled
9924 */
9925 tg3_load_57766_firmware(tp);
9926 }
9927
Joe Perches63c3a662011-04-26 08:12:10 +00009928 if (tg3_flag(tp, TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009929 err = tg3_load_tso_firmware(tp);
9930 if (err)
9931 return err;
9932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009933
9934 tp->tx_mode = TX_MODE_ENABLE;
Matt Carlsonf2096f92011-04-05 14:22:48 +00009935
Joe Perches63c3a662011-04-26 08:12:10 +00009936 if (tg3_flag(tp, 5755_PLUS) ||
Joe Perches41535772013-02-16 11:20:04 +00009937 tg3_asic_rev(tp) == ASIC_REV_5906)
Matt Carlsonb1d05212010-06-05 17:24:31 +00009938 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
Matt Carlsonf2096f92011-04-05 14:22:48 +00009939
Joe Perches41535772013-02-16 11:20:04 +00009940 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9941 tg3_asic_rev(tp) == ASIC_REV_5762) {
Matt Carlsonf2096f92011-04-05 14:22:48 +00009942 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9943 tp->tx_mode &= ~val;
9944 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9945 }
9946
Linus Torvalds1da177e2005-04-16 15:20:36 -07009947 tw32_f(MAC_TX_MODE, tp->tx_mode);
9948 udelay(100);
9949
Joe Perches63c3a662011-04-26 08:12:10 +00009950 if (tg3_flag(tp, ENABLE_RSS)) {
Matt Carlsonbcebcc42011-12-14 11:10:01 +00009951 tg3_rss_write_indir_tbl(tp);
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009952
9953 /* Setup the "secret" hash key. */
9954 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9955 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9956 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9957 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9958 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9959 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9960 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9961 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9962 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9963 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9964 }
9965
Linus Torvalds1da177e2005-04-16 15:20:36 -07009966 tp->rx_mode = RX_MODE_ENABLE;
Joe Perches63c3a662011-04-26 08:12:10 +00009967 if (tg3_flag(tp, 5755_PLUS))
Michael Chanaf36e6b2006-03-23 01:28:06 -08009968 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9969
Joe Perches63c3a662011-04-26 08:12:10 +00009970 if (tg3_flag(tp, ENABLE_RSS))
Matt Carlsonbaf8a942009-09-01 13:13:00 +00009971 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9972 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9973 RX_MODE_RSS_IPV6_HASH_EN |
9974 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9975 RX_MODE_RSS_IPV4_HASH_EN |
9976 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9977
Linus Torvalds1da177e2005-04-16 15:20:36 -07009978 tw32_f(MAC_RX_MODE, tp->rx_mode);
9979 udelay(10);
9980
Linus Torvalds1da177e2005-04-16 15:20:36 -07009981 tw32(MAC_LED_CTRL, tp->led_ctrl);
9982
9983 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009984 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009985 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9986 udelay(10);
9987 }
9988 tw32_f(MAC_RX_MODE, tp->rx_mode);
9989 udelay(10);
9990
Matt Carlsonf07e9af2010-08-02 11:26:07 +00009991 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
Joe Perches41535772013-02-16 11:20:04 +00009992 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9993 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009994 /* Set drive transmission level to 1.2V */
9995 /* only if the signal pre-emphasis bit is not set */
9996 val = tr32(MAC_SERDES_CFG);
9997 val &= 0xfffff000;
9998 val |= 0x880;
9999 tw32(MAC_SERDES_CFG, val);
10000 }
Joe Perches41535772013-02-16 11:20:04 +000010001 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010002 tw32(MAC_SERDES_CFG, 0x616000);
10003 }
10004
10005 /* Prevent chip from dropping frames when flow control
10006 * is enabled.
10007 */
Matt Carlson55086ad2011-12-14 11:09:59 +000010008 if (tg3_flag(tp, 57765_CLASS))
Matt Carlson666bc832010-01-20 16:58:03 +000010009 val = 1;
10010 else
10011 val = 2;
10012 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010013
Joe Perches41535772013-02-16 11:20:04 +000010014 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010015 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010016 /* Use hardware link auto-negotiation */
Joe Perches63c3a662011-04-26 08:12:10 +000010017 tg3_flag_set(tp, HW_AUTONEG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010018 }
10019
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010020 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
Joe Perches41535772013-02-16 11:20:04 +000010021 tg3_asic_rev(tp) == ASIC_REV_5714) {
Michael Chand4d2c552006-03-20 17:47:20 -080010022 u32 tmp;
10023
10024 tmp = tr32(SERDES_RX_CTRL);
10025 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10026 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10027 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10028 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10029 }
10030
Joe Perches63c3a662011-04-26 08:12:10 +000010031 if (!tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonc6700ce2012-02-13 15:20:15 +000010032 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Matt Carlson80096062010-08-02 11:26:06 +000010033 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010034
Matt Carlsondd477002008-05-25 23:45:58 -070010035 err = tg3_setup_phy(tp, 0);
10036 if (err)
10037 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010038
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010039 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10040 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
Matt Carlsondd477002008-05-25 23:45:58 -070010041 u32 tmp;
10042
10043 /* Clear CRC stats. */
10044 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10045 tg3_writephy(tp, MII_TG3_TEST1,
10046 tmp | MII_TG3_TEST1_CRC_EN);
Matt Carlsonf08aa1a2010-08-02 11:26:05 +000010047 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
Matt Carlsondd477002008-05-25 23:45:58 -070010048 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010049 }
10050 }
10051
10052 __tg3_set_rx_mode(tp->dev);
10053
10054 /* Initialize receive rules. */
10055 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10056 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10057 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10058 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10059
Joe Perches63c3a662011-04-26 08:12:10 +000010060 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010061 limit = 8;
10062 else
10063 limit = 16;
Joe Perches63c3a662011-04-26 08:12:10 +000010064 if (tg3_flag(tp, ENABLE_ASF))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010065 limit -= 4;
10066 switch (limit) {
10067 case 16:
10068 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10069 case 15:
10070 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10071 case 14:
10072 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10073 case 13:
10074 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10075 case 12:
10076 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10077 case 11:
10078 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10079 case 10:
10080 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10081 case 9:
10082 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10083 case 8:
10084 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10085 case 7:
10086 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10087 case 6:
10088 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10089 case 5:
10090 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10091 case 4:
10092 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10093 case 3:
10094 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10095 case 2:
10096 case 1:
10097
10098 default:
10099 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070010100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010101
Joe Perches63c3a662011-04-26 08:12:10 +000010102 if (tg3_flag(tp, ENABLE_APE))
Matt Carlson9ce768e2007-10-11 19:49:11 -070010103 /* Write our heartbeat update interval to APE. */
10104 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10105 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -070010106
Linus Torvalds1da177e2005-04-16 15:20:36 -070010107 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10108
Linus Torvalds1da177e2005-04-16 15:20:36 -070010109 return 0;
10110}
10111
10112/* Called at device open time to get the chip ready for
10113 * packet processing. Invoked with tp->lock held.
10114 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070010115static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010116{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010117 tg3_switch_clocks(tp);
10118
10119 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10120
Matt Carlson2f751b62008-08-04 23:17:34 -070010121 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010122}
10123
Michael Chanaed93e02012-07-16 16:24:02 +000010124static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10125{
10126 int i;
10127
10128 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10129 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10130
10131 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10132 off += len;
10133
10134 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10135 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10136 memset(ocir, 0, TG3_OCIR_LEN);
10137 }
10138}
10139
10140/* sysfs attributes for hwmon */
10141static ssize_t tg3_show_temp(struct device *dev,
10142 struct device_attribute *devattr, char *buf)
10143{
10144 struct pci_dev *pdev = to_pci_dev(dev);
10145 struct net_device *netdev = pci_get_drvdata(pdev);
10146 struct tg3 *tp = netdev_priv(netdev);
10147 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10148 u32 temperature;
10149
10150 spin_lock_bh(&tp->lock);
10151 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10152 sizeof(temperature));
10153 spin_unlock_bh(&tp->lock);
10154 return sprintf(buf, "%u\n", temperature);
10155}
10156
10157
10158static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10159 TG3_TEMP_SENSOR_OFFSET);
10160static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10161 TG3_TEMP_CAUTION_OFFSET);
10162static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10163 TG3_TEMP_MAX_OFFSET);
10164
10165static struct attribute *tg3_attributes[] = {
10166 &sensor_dev_attr_temp1_input.dev_attr.attr,
10167 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10168 &sensor_dev_attr_temp1_max.dev_attr.attr,
10169 NULL
10170};
10171
10172static const struct attribute_group tg3_group = {
10173 .attrs = tg3_attributes,
10174};
10175
Michael Chanaed93e02012-07-16 16:24:02 +000010176static void tg3_hwmon_close(struct tg3 *tp)
10177{
Michael Chanaed93e02012-07-16 16:24:02 +000010178 if (tp->hwmon_dev) {
10179 hwmon_device_unregister(tp->hwmon_dev);
10180 tp->hwmon_dev = NULL;
10181 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10182 }
Michael Chanaed93e02012-07-16 16:24:02 +000010183}
10184
10185static void tg3_hwmon_open(struct tg3 *tp)
10186{
Michael Chanaed93e02012-07-16 16:24:02 +000010187 int i, err;
10188 u32 size = 0;
10189 struct pci_dev *pdev = tp->pdev;
10190 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10191
10192 tg3_sd_scan_scratchpad(tp, ocirs);
10193
10194 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10195 if (!ocirs[i].src_data_length)
10196 continue;
10197
10198 size += ocirs[i].src_hdr_length;
10199 size += ocirs[i].src_data_length;
10200 }
10201
10202 if (!size)
10203 return;
10204
10205 /* Register hwmon sysfs hooks */
10206 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10207 if (err) {
10208 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10209 return;
10210 }
10211
10212 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10213 if (IS_ERR(tp->hwmon_dev)) {
10214 tp->hwmon_dev = NULL;
10215 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10216 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10217 }
Michael Chanaed93e02012-07-16 16:24:02 +000010218}
10219
10220
Linus Torvalds1da177e2005-04-16 15:20:36 -070010221#define TG3_STAT_ADD32(PSTAT, REG) \
10222do { u32 __val = tr32(REG); \
10223 (PSTAT)->low += __val; \
10224 if ((PSTAT)->low < __val) \
10225 (PSTAT)->high += 1; \
10226} while (0)
10227
10228static void tg3_periodic_fetch_stats(struct tg3 *tp)
10229{
10230 struct tg3_hw_stats *sp = tp->hw_stats;
10231
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000010232 if (!tp->link_up)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010233 return;
10234
10235 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10236 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10237 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10238 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10239 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10240 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10241 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10242 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10243 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10244 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10245 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10246 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10247 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
Michael Chan091f0ea2012-07-29 19:15:43 +000010248 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10249 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10250 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10251 u32 val;
10252
10253 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10254 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10255 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10256 tg3_flag_clear(tp, 5719_RDMA_BUG);
10257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010258
10259 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10260 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10261 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10262 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10263 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10264 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10265 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10266 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10267 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10268 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10269 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10270 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10271 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10272 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -070010273
10274 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
Joe Perches41535772013-02-16 11:20:04 +000010275 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10276 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10277 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
Matt Carlson4d958472011-04-20 07:57:35 +000010278 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10279 } else {
10280 u32 val = tr32(HOSTCC_FLOW_ATTN);
10281 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10282 if (val) {
10283 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10284 sp->rx_discards.low += val;
10285 if (sp->rx_discards.low < val)
10286 sp->rx_discards.high += 1;
10287 }
10288 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10289 }
Michael Chan463d3052006-05-22 16:36:27 -070010290 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010291}
10292
Matt Carlson0e6cf6a2011-06-13 13:38:55 +000010293static void tg3_chk_missed_msi(struct tg3 *tp)
10294{
10295 u32 i;
10296
10297 for (i = 0; i < tp->irq_cnt; i++) {
10298 struct tg3_napi *tnapi = &tp->napi[i];
10299
10300 if (tg3_has_work(tnapi)) {
10301 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10302 tnapi->last_tx_cons == tnapi->tx_cons) {
10303 if (tnapi->chk_msi_cnt < 1) {
10304 tnapi->chk_msi_cnt++;
10305 return;
10306 }
Matt Carlson7f230732011-08-31 11:44:48 +000010307 tg3_msi(0, tnapi);
Matt Carlson0e6cf6a2011-06-13 13:38:55 +000010308 }
10309 }
10310 tnapi->chk_msi_cnt = 0;
10311 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10312 tnapi->last_tx_cons = tnapi->tx_cons;
10313 }
10314}
10315
Linus Torvalds1da177e2005-04-16 15:20:36 -070010316static void tg3_timer(unsigned long __opaque)
10317{
10318 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010319
Matt Carlson5b190622011-11-04 09:15:04 +000010320 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
Michael Chanf475f162006-03-27 23:20:14 -080010321 goto restart_timer;
10322
David S. Millerf47c11e2005-06-24 20:18:35 -070010323 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010324
Joe Perches41535772013-02-16 11:20:04 +000010325 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
Matt Carlson55086ad2011-12-14 11:09:59 +000010326 tg3_flag(tp, 57765_CLASS))
Matt Carlson0e6cf6a2011-06-13 13:38:55 +000010327 tg3_chk_missed_msi(tp);
10328
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000010329 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10330 /* BCM4785: Flush posted writes from GbE to host memory. */
10331 tr32(HOSTCC_MODE);
10332 }
10333
Joe Perches63c3a662011-04-26 08:12:10 +000010334 if (!tg3_flag(tp, TAGGED_STATUS)) {
David S. Millerfac9b832005-05-18 22:46:34 -070010335 /* All of this garbage is because when using non-tagged
10336 * IRQ status the mailbox/status_block protocol the chip
10337 * uses with the cpu is race prone.
10338 */
Matt Carlson898a56f2009-08-28 14:02:40 +000010339 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
David S. Millerfac9b832005-05-18 22:46:34 -070010340 tw32(GRC_LOCAL_CTRL,
10341 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10342 } else {
10343 tw32(HOSTCC_MODE, tp->coalesce_mode |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000010344 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
David S. Millerfac9b832005-05-18 22:46:34 -070010345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010346
David S. Millerfac9b832005-05-18 22:46:34 -070010347 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
David S. Millerf47c11e2005-06-24 20:18:35 -070010348 spin_unlock(&tp->lock);
Matt Carlsondb219972011-11-04 09:15:03 +000010349 tg3_reset_task_schedule(tp);
Matt Carlson5b190622011-11-04 09:15:04 +000010350 goto restart_timer;
David S. Millerfac9b832005-05-18 22:46:34 -070010351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010352 }
10353
Linus Torvalds1da177e2005-04-16 15:20:36 -070010354 /* This part only runs once per second. */
10355 if (!--tp->timer_counter) {
Joe Perches63c3a662011-04-26 08:12:10 +000010356 if (tg3_flag(tp, 5705_PLUS))
David S. Millerfac9b832005-05-18 22:46:34 -070010357 tg3_periodic_fetch_stats(tp);
10358
Matt Carlsonb0c59432011-05-19 12:12:48 +000010359 if (tp->setlpicnt && !--tp->setlpicnt)
10360 tg3_phy_eee_enable(tp);
Matt Carlson52b02d02010-10-14 10:37:41 +000010361
Joe Perches63c3a662011-04-26 08:12:10 +000010362 if (tg3_flag(tp, USE_LINKCHG_REG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010363 u32 mac_stat;
10364 int phy_event;
10365
10366 mac_stat = tr32(MAC_STATUS);
10367
10368 phy_event = 0;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010369 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010370 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10371 phy_event = 1;
10372 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10373 phy_event = 1;
10374
10375 if (phy_event)
10376 tg3_setup_phy(tp, 0);
Joe Perches63c3a662011-04-26 08:12:10 +000010377 } else if (tg3_flag(tp, POLL_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010378 u32 mac_stat = tr32(MAC_STATUS);
10379 int need_setup = 0;
10380
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000010381 if (tp->link_up &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070010382 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10383 need_setup = 1;
10384 }
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000010385 if (!tp->link_up &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070010386 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10387 MAC_STATUS_SIGNAL_DET))) {
10388 need_setup = 1;
10389 }
10390 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -070010391 if (!tp->serdes_counter) {
10392 tw32_f(MAC_MODE,
10393 (tp->mac_mode &
10394 ~MAC_MODE_PORT_MODE_MASK));
10395 udelay(40);
10396 tw32_f(MAC_MODE, tp->mac_mode);
10397 udelay(40);
10398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010399 tg3_setup_phy(tp, 0);
10400 }
Matt Carlsonf07e9af2010-08-02 11:26:07 +000010401 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +000010402 tg3_flag(tp, 5780_CLASS)) {
Michael Chan747e8f82005-07-25 12:33:22 -070010403 tg3_serdes_parallel_detect(tp);
Matt Carlson57d8b882010-06-05 17:24:35 +000010404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010405
10406 tp->timer_counter = tp->timer_multiplier;
10407 }
10408
Michael Chan130b8e42006-09-27 16:00:40 -070010409 /* Heartbeat is only sent once every 2 seconds.
10410 *
10411 * The heartbeat is to tell the ASF firmware that the host
10412 * driver is still alive. In the event that the OS crashes,
10413 * ASF needs to reset the hardware to free up the FIFO space
10414 * that may be filled with rx packets destined for the host.
10415 * If the FIFO is full, ASF will no longer function properly.
10416 *
10417 * Unintended resets have been reported on real time kernels
10418 * where the timer doesn't run on time. Netpoll will also have
10419 * same problem.
10420 *
10421 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10422 * to check the ring condition when the heartbeat is expiring
10423 * before doing the reset. This will prevent most unintended
10424 * resets.
10425 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010426 if (!--tp->asf_counter) {
Joe Perches63c3a662011-04-26 08:12:10 +000010427 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -070010428 tg3_wait_for_event_ack(tp);
10429
Michael Chanbbadf502006-04-06 21:46:34 -070010430 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -070010431 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -070010432 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Matt Carlsonc6cdf432010-04-05 10:19:26 +000010433 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10434 TG3_FW_UPDATE_TIMEOUT_SEC);
Matt Carlson4ba526c2008-08-15 14:10:04 -070010435
10436 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010437 }
10438 tp->asf_counter = tp->asf_multiplier;
10439 }
10440
David S. Millerf47c11e2005-06-24 20:18:35 -070010441 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010442
Michael Chanf475f162006-03-27 23:20:14 -080010443restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -070010444 tp->timer.expires = jiffies + tp->timer_offset;
10445 add_timer(&tp->timer);
10446}
10447
Bill Pemberton229b1ad2012-12-03 09:22:59 -050010448static void tg3_timer_init(struct tg3 *tp)
Matt Carlson21f76382012-02-22 12:35:21 +000010449{
10450 if (tg3_flag(tp, TAGGED_STATUS) &&
Joe Perches41535772013-02-16 11:20:04 +000010451 tg3_asic_rev(tp) != ASIC_REV_5717 &&
Matt Carlson21f76382012-02-22 12:35:21 +000010452 !tg3_flag(tp, 57765_CLASS))
10453 tp->timer_offset = HZ;
10454 else
10455 tp->timer_offset = HZ / 10;
10456
10457 BUG_ON(tp->timer_offset > HZ);
10458
10459 tp->timer_multiplier = (HZ / tp->timer_offset);
10460 tp->asf_multiplier = (HZ / tp->timer_offset) *
10461 TG3_FW_UPDATE_FREQ_SEC;
10462
10463 init_timer(&tp->timer);
10464 tp->timer.data = (unsigned long) tp;
10465 tp->timer.function = tg3_timer;
10466}
10467
10468static void tg3_timer_start(struct tg3 *tp)
10469{
10470 tp->asf_counter = tp->asf_multiplier;
10471 tp->timer_counter = tp->timer_multiplier;
10472
10473 tp->timer.expires = jiffies + tp->timer_offset;
10474 add_timer(&tp->timer);
10475}
10476
10477static void tg3_timer_stop(struct tg3 *tp)
10478{
10479 del_timer_sync(&tp->timer);
10480}
10481
10482/* Restart hardware after configuration changes, self-test, etc.
10483 * Invoked with tp->lock held.
10484 */
10485static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10486 __releases(tp->lock)
10487 __acquires(tp->lock)
10488{
10489 int err;
10490
10491 err = tg3_init_hw(tp, reset_phy);
10492 if (err) {
10493 netdev_err(tp->dev,
10494 "Failed to re-initialize device, aborting\n");
10495 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10496 tg3_full_unlock(tp);
10497 tg3_timer_stop(tp);
10498 tp->irq_sync = 0;
10499 tg3_napi_enable(tp);
10500 dev_close(tp->dev);
10501 tg3_full_lock(tp, 0);
10502 }
10503 return err;
10504}
10505
10506static void tg3_reset_task(struct work_struct *work)
10507{
10508 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10509 int err;
10510
10511 tg3_full_lock(tp, 0);
10512
10513 if (!netif_running(tp->dev)) {
10514 tg3_flag_clear(tp, RESET_TASK_PENDING);
10515 tg3_full_unlock(tp);
10516 return;
10517 }
10518
10519 tg3_full_unlock(tp);
10520
10521 tg3_phy_stop(tp);
10522
10523 tg3_netif_stop(tp);
10524
10525 tg3_full_lock(tp, 1);
10526
10527 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10528 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10529 tp->write32_rx_mbox = tg3_write_flush_reg32;
10530 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10531 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10532 }
10533
10534 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10535 err = tg3_init_hw(tp, 1);
10536 if (err)
10537 goto out;
10538
10539 tg3_netif_start(tp);
10540
10541out:
10542 tg3_full_unlock(tp);
10543
10544 if (!err)
10545 tg3_phy_start(tp);
10546
10547 tg3_flag_clear(tp, RESET_TASK_PENDING);
10548}
10549
Matt Carlson4f125f42009-09-01 12:55:02 +000010550static int tg3_request_irq(struct tg3 *tp, int irq_num)
Michael Chanfcfa0a32006-03-20 22:28:41 -080010551{
David Howells7d12e782006-10-05 14:55:46 +010010552 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010553 unsigned long flags;
Matt Carlson4f125f42009-09-01 12:55:02 +000010554 char *name;
10555 struct tg3_napi *tnapi = &tp->napi[irq_num];
10556
10557 if (tp->irq_cnt == 1)
10558 name = tp->dev->name;
10559 else {
10560 name = &tnapi->irq_lbl[0];
10561 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10562 name[IFNAMSIZ-1] = 0;
10563 }
Michael Chanfcfa0a32006-03-20 22:28:41 -080010564
Joe Perches63c3a662011-04-26 08:12:10 +000010565 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
Michael Chanfcfa0a32006-03-20 22:28:41 -080010566 fn = tg3_msi;
Joe Perches63c3a662011-04-26 08:12:10 +000010567 if (tg3_flag(tp, 1SHOT_MSI))
Michael Chanfcfa0a32006-03-20 22:28:41 -080010568 fn = tg3_msi_1shot;
Javier Martinez Canillasab392d22011-03-28 16:27:31 +000010569 flags = 0;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010570 } else {
10571 fn = tg3_interrupt;
Joe Perches63c3a662011-04-26 08:12:10 +000010572 if (tg3_flag(tp, TAGGED_STATUS))
Michael Chanfcfa0a32006-03-20 22:28:41 -080010573 fn = tg3_interrupt_tagged;
Javier Martinez Canillasab392d22011-03-28 16:27:31 +000010574 flags = IRQF_SHARED;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010575 }
Matt Carlson4f125f42009-09-01 12:55:02 +000010576
10577 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010578}
10579
Michael Chan79381092005-04-21 17:13:59 -070010580static int tg3_test_interrupt(struct tg3 *tp)
10581{
Matt Carlson09943a12009-08-28 14:01:57 +000010582 struct tg3_napi *tnapi = &tp->napi[0];
Michael Chan79381092005-04-21 17:13:59 -070010583 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -070010584 int err, i, intr_ok = 0;
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010585 u32 val;
Michael Chan79381092005-04-21 17:13:59 -070010586
Michael Chand4bc3922005-05-29 14:59:20 -070010587 if (!netif_running(dev))
10588 return -ENODEV;
10589
Michael Chan79381092005-04-21 17:13:59 -070010590 tg3_disable_ints(tp);
10591
Matt Carlson4f125f42009-09-01 12:55:02 +000010592 free_irq(tnapi->irq_vec, tnapi);
Michael Chan79381092005-04-21 17:13:59 -070010593
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010594 /*
10595 * Turn off MSI one shot mode. Otherwise this test has no
10596 * observable way to know whether the interrupt was delivered.
10597 */
Matt Carlson3aa1cdf2011-07-20 10:20:55 +000010598 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010599 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10600 tw32(MSGINT_MODE, val);
10601 }
10602
Matt Carlson4f125f42009-09-01 12:55:02 +000010603 err = request_irq(tnapi->irq_vec, tg3_test_isr,
Davidlohr Buesof274fd92012-02-22 03:06:54 +000010604 IRQF_SHARED, dev->name, tnapi);
Michael Chan79381092005-04-21 17:13:59 -070010605 if (err)
10606 return err;
10607
Matt Carlson898a56f2009-08-28 14:02:40 +000010608 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -070010609 tg3_enable_ints(tp);
10610
10611 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000010612 tnapi->coal_now);
Michael Chan79381092005-04-21 17:13:59 -070010613
10614 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -070010615 u32 int_mbox, misc_host_ctrl;
10616
Matt Carlson898a56f2009-08-28 14:02:40 +000010617 int_mbox = tr32_mailbox(tnapi->int_mbox);
Michael Chanb16250e2006-09-27 16:10:14 -070010618 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10619
10620 if ((int_mbox != 0) ||
10621 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10622 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -070010623 break;
Michael Chanb16250e2006-09-27 16:10:14 -070010624 }
10625
Matt Carlson3aa1cdf2011-07-20 10:20:55 +000010626 if (tg3_flag(tp, 57765_PLUS) &&
10627 tnapi->hw_status->status_tag != tnapi->last_tag)
10628 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10629
Michael Chan79381092005-04-21 17:13:59 -070010630 msleep(10);
10631 }
10632
10633 tg3_disable_ints(tp);
10634
Matt Carlson4f125f42009-09-01 12:55:02 +000010635 free_irq(tnapi->irq_vec, tnapi);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010636
Matt Carlson4f125f42009-09-01 12:55:02 +000010637 err = tg3_request_irq(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -070010638
10639 if (err)
10640 return err;
10641
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010642 if (intr_ok) {
10643 /* Reenable MSI one shot mode. */
Matt Carlson5b39de92011-08-31 11:44:50 +000010644 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010645 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10646 tw32(MSGINT_MODE, val);
10647 }
Michael Chan79381092005-04-21 17:13:59 -070010648 return 0;
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010649 }
Michael Chan79381092005-04-21 17:13:59 -070010650
10651 return -EIO;
10652}
10653
10654/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10655 * successfully restored
10656 */
10657static int tg3_test_msi(struct tg3 *tp)
10658{
Michael Chan79381092005-04-21 17:13:59 -070010659 int err;
10660 u16 pci_cmd;
10661
Joe Perches63c3a662011-04-26 08:12:10 +000010662 if (!tg3_flag(tp, USING_MSI))
Michael Chan79381092005-04-21 17:13:59 -070010663 return 0;
10664
10665 /* Turn off SERR reporting in case MSI terminates with Master
10666 * Abort.
10667 */
10668 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10669 pci_write_config_word(tp->pdev, PCI_COMMAND,
10670 pci_cmd & ~PCI_COMMAND_SERR);
10671
10672 err = tg3_test_interrupt(tp);
10673
10674 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10675
10676 if (!err)
10677 return 0;
10678
10679 /* other failures */
10680 if (err != -EIO)
10681 return err;
10682
10683 /* MSI test failed, go back to INTx mode */
Matt Carlson5129c3a2010-04-05 10:19:23 +000010684 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10685 "to INTx mode. Please report this failure to the PCI "
10686 "maintainer and include system chipset information\n");
Michael Chan79381092005-04-21 17:13:59 -070010687
Matt Carlson4f125f42009-09-01 12:55:02 +000010688 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
Matt Carlson09943a12009-08-28 14:01:57 +000010689
Michael Chan79381092005-04-21 17:13:59 -070010690 pci_disable_msi(tp->pdev);
10691
Joe Perches63c3a662011-04-26 08:12:10 +000010692 tg3_flag_clear(tp, USING_MSI);
Andre Detschdc8bf1b2010-04-26 07:27:07 +000010693 tp->napi[0].irq_vec = tp->pdev->irq;
Michael Chan79381092005-04-21 17:13:59 -070010694
Matt Carlson4f125f42009-09-01 12:55:02 +000010695 err = tg3_request_irq(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -070010696 if (err)
10697 return err;
10698
10699 /* Need to reset the chip because the MSI cycle may have terminated
10700 * with Master Abort.
10701 */
David S. Millerf47c11e2005-06-24 20:18:35 -070010702 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -070010703
Michael Chan944d9802005-05-29 14:57:48 -070010704 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070010705 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -070010706
David S. Millerf47c11e2005-06-24 20:18:35 -070010707 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -070010708
10709 if (err)
Matt Carlson4f125f42009-09-01 12:55:02 +000010710 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
Michael Chan79381092005-04-21 17:13:59 -070010711
10712 return err;
10713}
10714
Matt Carlson9e9fd122009-01-19 16:57:45 -080010715static int tg3_request_firmware(struct tg3 *tp)
10716{
Nithin Sujir77997ea2013-03-06 17:02:32 +000010717 const struct tg3_firmware_hdr *fw_hdr;
Matt Carlson9e9fd122009-01-19 16:57:45 -080010718
10719 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
Joe Perches05dbe002010-02-17 19:44:19 +000010720 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10721 tp->fw_needed);
Matt Carlson9e9fd122009-01-19 16:57:45 -080010722 return -ENOENT;
10723 }
10724
Nithin Sujir77997ea2013-03-06 17:02:32 +000010725 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
Matt Carlson9e9fd122009-01-19 16:57:45 -080010726
10727 /* Firmware blob starts with version numbers, followed by
10728 * start address and _full_ length including BSS sections
10729 * (which must be longer than the actual data, of course
10730 */
10731
Nithin Sujir77997ea2013-03-06 17:02:32 +000010732 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10733 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
Joe Perches05dbe002010-02-17 19:44:19 +000010734 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10735 tp->fw_len, tp->fw_needed);
Matt Carlson9e9fd122009-01-19 16:57:45 -080010736 release_firmware(tp->fw);
10737 tp->fw = NULL;
10738 return -EINVAL;
10739 }
10740
10741 /* We no longer need firmware; we have it. */
10742 tp->fw_needed = NULL;
10743 return 0;
10744}
10745
Michael Chan91024262012-09-28 07:12:38 +000010746static u32 tg3_irq_count(struct tg3 *tp)
Matt Carlson679563f2009-09-01 12:55:46 +000010747{
Michael Chan91024262012-09-28 07:12:38 +000010748 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
Matt Carlson679563f2009-09-01 12:55:46 +000010749
Michael Chan91024262012-09-28 07:12:38 +000010750 if (irq_cnt > 1) {
Matt Carlsonc3b50032012-01-17 15:27:23 +000010751 /* We want as many rx rings enabled as there are cpus.
10752 * In multiqueue MSI-X mode, the first MSI-X vector
10753 * only deals with link interrupts, etc, so we add
10754 * one to the number of vectors we are requesting.
10755 */
Michael Chan91024262012-09-28 07:12:38 +000010756 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
Matt Carlsonc3b50032012-01-17 15:27:23 +000010757 }
Matt Carlson679563f2009-09-01 12:55:46 +000010758
Michael Chan91024262012-09-28 07:12:38 +000010759 return irq_cnt;
10760}
10761
10762static bool tg3_enable_msix(struct tg3 *tp)
10763{
10764 int i, rc;
Michael Chan86449942012-10-02 20:31:14 -070010765 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
Michael Chan91024262012-09-28 07:12:38 +000010766
Michael Chan09681692012-09-28 07:12:42 +000010767 tp->txq_cnt = tp->txq_req;
10768 tp->rxq_cnt = tp->rxq_req;
10769 if (!tp->rxq_cnt)
10770 tp->rxq_cnt = netif_get_num_default_rss_queues();
Michael Chan91024262012-09-28 07:12:38 +000010771 if (tp->rxq_cnt > tp->rxq_max)
10772 tp->rxq_cnt = tp->rxq_max;
Michael Chancf6d6ea2012-09-28 07:12:43 +000010773
10774 /* Disable multiple TX rings by default. Simple round-robin hardware
10775 * scheduling of the TX rings can cause starvation of rings with
10776 * small packets when other rings have TSO or jumbo packets.
10777 */
10778 if (!tp->txq_req)
10779 tp->txq_cnt = 1;
Michael Chan91024262012-09-28 07:12:38 +000010780
10781 tp->irq_cnt = tg3_irq_count(tp);
10782
Matt Carlson679563f2009-09-01 12:55:46 +000010783 for (i = 0; i < tp->irq_max; i++) {
10784 msix_ent[i].entry = i;
10785 msix_ent[i].vector = 0;
10786 }
10787
10788 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
Matt Carlson2430b032010-06-05 17:24:34 +000010789 if (rc < 0) {
10790 return false;
10791 } else if (rc != 0) {
Matt Carlson679563f2009-09-01 12:55:46 +000010792 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10793 return false;
Joe Perches05dbe002010-02-17 19:44:19 +000010794 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10795 tp->irq_cnt, rc);
Matt Carlson679563f2009-09-01 12:55:46 +000010796 tp->irq_cnt = rc;
Michael Chan49a359e2012-09-28 07:12:37 +000010797 tp->rxq_cnt = max(rc - 1, 1);
Michael Chan91024262012-09-28 07:12:38 +000010798 if (tp->txq_cnt)
10799 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
Matt Carlson679563f2009-09-01 12:55:46 +000010800 }
10801
10802 for (i = 0; i < tp->irq_max; i++)
10803 tp->napi[i].irq_vec = msix_ent[i].vector;
10804
Michael Chan49a359e2012-09-28 07:12:37 +000010805 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
Ben Hutchings2ddaad32010-09-27 22:11:51 -070010806 pci_disable_msix(tp->pdev);
10807 return false;
10808 }
Matt Carlsonb92b9042010-11-24 08:31:51 +000010809
Michael Chan91024262012-09-28 07:12:38 +000010810 if (tp->irq_cnt == 1)
10811 return true;
Matt Carlsond78b59f2011-04-05 14:22:46 +000010812
Michael Chan91024262012-09-28 07:12:38 +000010813 tg3_flag_set(tp, ENABLE_RSS);
10814
10815 if (tp->txq_cnt > 1)
10816 tg3_flag_set(tp, ENABLE_TSS);
10817
10818 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
Matt Carlson2430b032010-06-05 17:24:34 +000010819
Matt Carlson679563f2009-09-01 12:55:46 +000010820 return true;
10821}
10822
Matt Carlson07b01732009-08-28 14:01:15 +000010823static void tg3_ints_init(struct tg3 *tp)
10824{
Joe Perches63c3a662011-04-26 08:12:10 +000010825 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10826 !tg3_flag(tp, TAGGED_STATUS)) {
Matt Carlson07b01732009-08-28 14:01:15 +000010827 /* All MSI supporting chips should support tagged
10828 * status. Assert that this is the case.
10829 */
Matt Carlson5129c3a2010-04-05 10:19:23 +000010830 netdev_warn(tp->dev,
10831 "MSI without TAGGED_STATUS? Not using MSI\n");
Matt Carlson679563f2009-09-01 12:55:46 +000010832 goto defcfg;
Matt Carlson07b01732009-08-28 14:01:15 +000010833 }
Matt Carlson4f125f42009-09-01 12:55:02 +000010834
Joe Perches63c3a662011-04-26 08:12:10 +000010835 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10836 tg3_flag_set(tp, USING_MSIX);
10837 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10838 tg3_flag_set(tp, USING_MSI);
Matt Carlson679563f2009-09-01 12:55:46 +000010839
Joe Perches63c3a662011-04-26 08:12:10 +000010840 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
Matt Carlson679563f2009-09-01 12:55:46 +000010841 u32 msi_mode = tr32(MSGINT_MODE);
Joe Perches63c3a662011-04-26 08:12:10 +000010842 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
Matt Carlsonbaf8a942009-09-01 13:13:00 +000010843 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
Matt Carlson5b39de92011-08-31 11:44:50 +000010844 if (!tg3_flag(tp, 1SHOT_MSI))
10845 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
Matt Carlson679563f2009-09-01 12:55:46 +000010846 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10847 }
10848defcfg:
Joe Perches63c3a662011-04-26 08:12:10 +000010849 if (!tg3_flag(tp, USING_MSIX)) {
Matt Carlson679563f2009-09-01 12:55:46 +000010850 tp->irq_cnt = 1;
10851 tp->napi[0].irq_vec = tp->pdev->irq;
Michael Chan49a359e2012-09-28 07:12:37 +000010852 }
10853
10854 if (tp->irq_cnt == 1) {
10855 tp->txq_cnt = 1;
10856 tp->rxq_cnt = 1;
Ben Hutchings2ddaad32010-09-27 22:11:51 -070010857 netif_set_real_num_tx_queues(tp->dev, 1);
Matt Carlson85407882010-10-06 13:40:58 -070010858 netif_set_real_num_rx_queues(tp->dev, 1);
Matt Carlson679563f2009-09-01 12:55:46 +000010859 }
Matt Carlson07b01732009-08-28 14:01:15 +000010860}
10861
10862static void tg3_ints_fini(struct tg3 *tp)
10863{
Joe Perches63c3a662011-04-26 08:12:10 +000010864 if (tg3_flag(tp, USING_MSIX))
Matt Carlson679563f2009-09-01 12:55:46 +000010865 pci_disable_msix(tp->pdev);
Joe Perches63c3a662011-04-26 08:12:10 +000010866 else if (tg3_flag(tp, USING_MSI))
Matt Carlson679563f2009-09-01 12:55:46 +000010867 pci_disable_msi(tp->pdev);
Joe Perches63c3a662011-04-26 08:12:10 +000010868 tg3_flag_clear(tp, USING_MSI);
10869 tg3_flag_clear(tp, USING_MSIX);
10870 tg3_flag_clear(tp, ENABLE_RSS);
10871 tg3_flag_clear(tp, ENABLE_TSS);
Matt Carlson07b01732009-08-28 14:01:15 +000010872}
10873
Matt Carlsonbe947302012-12-03 19:36:57 +000010874static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10875 bool init)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010876{
Michael Chand8f4cd32012-09-28 07:12:40 +000010877 struct net_device *dev = tp->dev;
Matt Carlson4f125f42009-09-01 12:55:02 +000010878 int i, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010879
Matt Carlson679563f2009-09-01 12:55:46 +000010880 /*
10881 * Setup interrupts first so we know how
10882 * many NAPI resources to allocate
10883 */
10884 tg3_ints_init(tp);
10885
Matt Carlson90415472011-12-16 13:33:23 +000010886 tg3_rss_check_indir_tbl(tp);
Matt Carlsonbcebcc42011-12-14 11:10:01 +000010887
Linus Torvalds1da177e2005-04-16 15:20:36 -070010888 /* The placement of this call is tied
10889 * to the setup and use of Host TX descriptors.
10890 */
10891 err = tg3_alloc_consistent(tp);
10892 if (err)
Matt Carlson679563f2009-09-01 12:55:46 +000010893 goto err_out1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010894
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010895 tg3_napi_init(tp);
10896
Matt Carlsonfed97812009-09-01 13:10:19 +000010897 tg3_napi_enable(tp);
Stephen Hemmingerbea33482007-10-03 16:41:36 -070010898
Matt Carlson4f125f42009-09-01 12:55:02 +000010899 for (i = 0; i < tp->irq_cnt; i++) {
10900 struct tg3_napi *tnapi = &tp->napi[i];
10901 err = tg3_request_irq(tp, i);
10902 if (err) {
Matt Carlson5bc09182011-11-04 09:15:01 +000010903 for (i--; i >= 0; i--) {
10904 tnapi = &tp->napi[i];
Matt Carlson4f125f42009-09-01 12:55:02 +000010905 free_irq(tnapi->irq_vec, tnapi);
Matt Carlson5bc09182011-11-04 09:15:01 +000010906 }
10907 goto err_out2;
Matt Carlson4f125f42009-09-01 12:55:02 +000010908 }
10909 }
Matt Carlson07b01732009-08-28 14:01:15 +000010910
David S. Millerf47c11e2005-06-24 20:18:35 -070010911 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010912
Michael Chand8f4cd32012-09-28 07:12:40 +000010913 err = tg3_init_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010914 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -070010915 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010916 tg3_free_rings(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010917 }
10918
David S. Millerf47c11e2005-06-24 20:18:35 -070010919 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010920
Matt Carlson07b01732009-08-28 14:01:15 +000010921 if (err)
Matt Carlson679563f2009-09-01 12:55:46 +000010922 goto err_out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010923
Michael Chand8f4cd32012-09-28 07:12:40 +000010924 if (test_irq && tg3_flag(tp, USING_MSI)) {
Michael Chan79381092005-04-21 17:13:59 -070010925 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -070010926
Michael Chan79381092005-04-21 17:13:59 -070010927 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070010928 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070010929 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -070010930 tg3_free_rings(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070010931 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -070010932
Matt Carlson679563f2009-09-01 12:55:46 +000010933 goto err_out2;
Michael Chan79381092005-04-21 17:13:59 -070010934 }
Michael Chanfcfa0a32006-03-20 22:28:41 -080010935
Joe Perches63c3a662011-04-26 08:12:10 +000010936 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010937 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010938
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000010939 tw32(PCIE_TRANSACTION_CFG,
10940 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -080010941 }
Michael Chan79381092005-04-21 17:13:59 -070010942 }
10943
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010944 tg3_phy_start(tp);
10945
Michael Chanaed93e02012-07-16 16:24:02 +000010946 tg3_hwmon_open(tp);
10947
David S. Millerf47c11e2005-06-24 20:18:35 -070010948 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010949
Matt Carlson21f76382012-02-22 12:35:21 +000010950 tg3_timer_start(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000010951 tg3_flag_set(tp, INIT_COMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010952 tg3_enable_ints(tp);
10953
Matt Carlsonbe947302012-12-03 19:36:57 +000010954 if (init)
10955 tg3_ptp_init(tp);
10956 else
10957 tg3_ptp_resume(tp);
10958
10959
David S. Millerf47c11e2005-06-24 20:18:35 -070010960 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010961
Matt Carlsonfe5f5782009-09-01 13:09:39 +000010962 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010963
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000010964 /*
10965 * Reset loopback feature if it was turned on while the device was down
10966 * make sure that it's installed properly now.
10967 */
10968 if (dev->features & NETIF_F_LOOPBACK)
10969 tg3_set_loopback(dev, dev->features);
10970
Linus Torvalds1da177e2005-04-16 15:20:36 -070010971 return 0;
Matt Carlson07b01732009-08-28 14:01:15 +000010972
Matt Carlson679563f2009-09-01 12:55:46 +000010973err_out3:
Matt Carlson4f125f42009-09-01 12:55:02 +000010974 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10975 struct tg3_napi *tnapi = &tp->napi[i];
10976 free_irq(tnapi->irq_vec, tnapi);
10977 }
Matt Carlson07b01732009-08-28 14:01:15 +000010978
Matt Carlson679563f2009-09-01 12:55:46 +000010979err_out2:
Matt Carlsonfed97812009-09-01 13:10:19 +000010980 tg3_napi_disable(tp);
Matt Carlson66cfd1b2010-09-30 10:34:30 +000010981 tg3_napi_fini(tp);
Matt Carlson07b01732009-08-28 14:01:15 +000010982 tg3_free_consistent(tp);
Matt Carlson679563f2009-09-01 12:55:46 +000010983
10984err_out1:
10985 tg3_ints_fini(tp);
Michael Chand8f4cd32012-09-28 07:12:40 +000010986
Matt Carlson07b01732009-08-28 14:01:15 +000010987 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010988}
10989
Michael Chan65138592012-09-28 07:12:41 +000010990static void tg3_stop(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010991{
Matt Carlson4f125f42009-09-01 12:55:02 +000010992 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010993
Matt Carlsondb219972011-11-04 09:15:03 +000010994 tg3_reset_task_cancel(tp);
Nithin Nayak Sujirbd473da2012-11-05 14:26:30 +000010995 tg3_netif_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010996
Matt Carlson21f76382012-02-22 12:35:21 +000010997 tg3_timer_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010998
Michael Chanaed93e02012-07-16 16:24:02 +000010999 tg3_hwmon_close(tp);
11000
Matt Carlson24bb4fb2009-10-05 17:55:29 +000011001 tg3_phy_stop(tp);
11002
David S. Millerf47c11e2005-06-24 20:18:35 -070011003 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011004
11005 tg3_disable_ints(tp);
11006
Michael Chan944d9802005-05-29 14:57:48 -070011007 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011008 tg3_free_rings(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000011009 tg3_flag_clear(tp, INIT_COMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011010
David S. Millerf47c11e2005-06-24 20:18:35 -070011011 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011012
Matt Carlson4f125f42009-09-01 12:55:02 +000011013 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11014 struct tg3_napi *tnapi = &tp->napi[i];
11015 free_irq(tnapi->irq_vec, tnapi);
11016 }
Matt Carlson07b01732009-08-28 14:01:15 +000011017
11018 tg3_ints_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011019
Matt Carlson66cfd1b2010-09-30 10:34:30 +000011020 tg3_napi_fini(tp);
11021
Linus Torvalds1da177e2005-04-16 15:20:36 -070011022 tg3_free_consistent(tp);
Michael Chan65138592012-09-28 07:12:41 +000011023}
11024
Michael Chand8f4cd32012-09-28 07:12:40 +000011025static int tg3_open(struct net_device *dev)
11026{
11027 struct tg3 *tp = netdev_priv(dev);
11028 int err;
11029
11030 if (tp->fw_needed) {
11031 err = tg3_request_firmware(tp);
Nithin Sujirc4dab502013-03-06 17:02:34 +000011032 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11033 if (err) {
11034 netdev_warn(tp->dev, "EEE capability disabled\n");
11035 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11036 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11037 netdev_warn(tp->dev, "EEE capability restored\n");
11038 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11039 }
11040 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
Michael Chand8f4cd32012-09-28 07:12:40 +000011041 if (err)
11042 return err;
11043 } else if (err) {
11044 netdev_warn(tp->dev, "TSO capability disabled\n");
11045 tg3_flag_clear(tp, TSO_CAPABLE);
11046 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11047 netdev_notice(tp->dev, "TSO capability restored\n");
11048 tg3_flag_set(tp, TSO_CAPABLE);
11049 }
11050 }
11051
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000011052 tg3_carrier_off(tp);
Michael Chand8f4cd32012-09-28 07:12:40 +000011053
11054 err = tg3_power_up(tp);
11055 if (err)
11056 return err;
11057
11058 tg3_full_lock(tp, 0);
11059
11060 tg3_disable_ints(tp);
11061 tg3_flag_clear(tp, INIT_COMPLETE);
11062
11063 tg3_full_unlock(tp);
11064
Matt Carlsonbe947302012-12-03 19:36:57 +000011065 err = tg3_start(tp, true, true, true);
Michael Chand8f4cd32012-09-28 07:12:40 +000011066 if (err) {
11067 tg3_frob_aux_power(tp, false);
11068 pci_set_power_state(tp->pdev, PCI_D3hot);
11069 }
Matt Carlsonbe947302012-12-03 19:36:57 +000011070
Matt Carlson7d41e492012-12-03 19:36:58 +000011071 if (tg3_flag(tp, PTP_CAPABLE)) {
11072 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11073 &tp->pdev->dev);
11074 if (IS_ERR(tp->ptp_clock))
11075 tp->ptp_clock = NULL;
11076 }
11077
Linus Torvalds1da177e2005-04-16 15:20:36 -070011078 return err;
11079}
11080
11081static int tg3_close(struct net_device *dev)
11082{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011083 struct tg3 *tp = netdev_priv(dev);
11084
Matt Carlsonbe947302012-12-03 19:36:57 +000011085 tg3_ptp_fini(tp);
11086
Michael Chan65138592012-09-28 07:12:41 +000011087 tg3_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011088
11089 /* Clear stats across close / open calls */
11090 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11091 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011092
11093 tg3_power_down(tp);
11094
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000011095 tg3_carrier_off(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011096
11097 return 0;
11098}
11099
11100static inline u64 get_stat64(tg3_stat64_t *val)
11101{
11102 return ((u64)val->high << 32) | ((u64)val->low);
11103}
11104
11105static u64 tg3_calc_crc_errors(struct tg3 *tp)
11106{
11107 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11108
11109 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
Joe Perches41535772013-02-16 11:20:04 +000011110 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11111 tg3_asic_rev(tp) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011112 u32 val;
11113
11114 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11115 tg3_writephy(tp, MII_TG3_TEST1,
11116 val | MII_TG3_TEST1_CRC_EN);
11117 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11118 } else
11119 val = 0;
11120
11121 tp->phy_crc_errors += val;
11122
11123 return tp->phy_crc_errors;
11124 }
11125
11126 return get_stat64(&hw_stats->rx_fcs_errors);
11127}
11128
11129#define ESTAT_ADD(member) \
11130 estats->member = old_estats->member + \
11131 get_stat64(&hw_stats->member)
11132
11133static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11134{
11135 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11136 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11137
11138 ESTAT_ADD(rx_octets);
11139 ESTAT_ADD(rx_fragments);
11140 ESTAT_ADD(rx_ucast_packets);
11141 ESTAT_ADD(rx_mcast_packets);
11142 ESTAT_ADD(rx_bcast_packets);
11143 ESTAT_ADD(rx_fcs_errors);
11144 ESTAT_ADD(rx_align_errors);
11145 ESTAT_ADD(rx_xon_pause_rcvd);
11146 ESTAT_ADD(rx_xoff_pause_rcvd);
11147 ESTAT_ADD(rx_mac_ctrl_rcvd);
11148 ESTAT_ADD(rx_xoff_entered);
11149 ESTAT_ADD(rx_frame_too_long_errors);
11150 ESTAT_ADD(rx_jabbers);
11151 ESTAT_ADD(rx_undersize_packets);
11152 ESTAT_ADD(rx_in_length_errors);
11153 ESTAT_ADD(rx_out_length_errors);
11154 ESTAT_ADD(rx_64_or_less_octet_packets);
11155 ESTAT_ADD(rx_65_to_127_octet_packets);
11156 ESTAT_ADD(rx_128_to_255_octet_packets);
11157 ESTAT_ADD(rx_256_to_511_octet_packets);
11158 ESTAT_ADD(rx_512_to_1023_octet_packets);
11159 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11160 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11161 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11162 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11163 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11164
11165 ESTAT_ADD(tx_octets);
11166 ESTAT_ADD(tx_collisions);
11167 ESTAT_ADD(tx_xon_sent);
11168 ESTAT_ADD(tx_xoff_sent);
11169 ESTAT_ADD(tx_flow_control);
11170 ESTAT_ADD(tx_mac_errors);
11171 ESTAT_ADD(tx_single_collisions);
11172 ESTAT_ADD(tx_mult_collisions);
11173 ESTAT_ADD(tx_deferred);
11174 ESTAT_ADD(tx_excessive_collisions);
11175 ESTAT_ADD(tx_late_collisions);
11176 ESTAT_ADD(tx_collide_2times);
11177 ESTAT_ADD(tx_collide_3times);
11178 ESTAT_ADD(tx_collide_4times);
11179 ESTAT_ADD(tx_collide_5times);
11180 ESTAT_ADD(tx_collide_6times);
11181 ESTAT_ADD(tx_collide_7times);
11182 ESTAT_ADD(tx_collide_8times);
11183 ESTAT_ADD(tx_collide_9times);
11184 ESTAT_ADD(tx_collide_10times);
11185 ESTAT_ADD(tx_collide_11times);
11186 ESTAT_ADD(tx_collide_12times);
11187 ESTAT_ADD(tx_collide_13times);
11188 ESTAT_ADD(tx_collide_14times);
11189 ESTAT_ADD(tx_collide_15times);
11190 ESTAT_ADD(tx_ucast_packets);
11191 ESTAT_ADD(tx_mcast_packets);
11192 ESTAT_ADD(tx_bcast_packets);
11193 ESTAT_ADD(tx_carrier_sense_errors);
11194 ESTAT_ADD(tx_discards);
11195 ESTAT_ADD(tx_errors);
11196
11197 ESTAT_ADD(dma_writeq_full);
11198 ESTAT_ADD(dma_write_prioq_full);
11199 ESTAT_ADD(rxbds_empty);
11200 ESTAT_ADD(rx_discards);
11201 ESTAT_ADD(rx_errors);
11202 ESTAT_ADD(rx_threshold_hit);
11203
11204 ESTAT_ADD(dma_readq_full);
11205 ESTAT_ADD(dma_read_prioq_full);
11206 ESTAT_ADD(tx_comp_queue_full);
11207
11208 ESTAT_ADD(ring_set_send_prod_index);
11209 ESTAT_ADD(ring_status_update);
11210 ESTAT_ADD(nic_irqs);
11211 ESTAT_ADD(nic_avoided_irqs);
11212 ESTAT_ADD(nic_tx_threshold_hit);
11213
Matt Carlson4452d092011-05-19 12:12:51 +000011214 ESTAT_ADD(mbuf_lwm_thresh_hit);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011215}
11216
Matt Carlson65ec6982012-02-28 23:33:37 +000011217static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011218{
Eric Dumazet511d2222010-07-07 20:44:24 +000011219 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011220 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11221
Linus Torvalds1da177e2005-04-16 15:20:36 -070011222 stats->rx_packets = old_stats->rx_packets +
11223 get_stat64(&hw_stats->rx_ucast_packets) +
11224 get_stat64(&hw_stats->rx_mcast_packets) +
11225 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011226
Linus Torvalds1da177e2005-04-16 15:20:36 -070011227 stats->tx_packets = old_stats->tx_packets +
11228 get_stat64(&hw_stats->tx_ucast_packets) +
11229 get_stat64(&hw_stats->tx_mcast_packets) +
11230 get_stat64(&hw_stats->tx_bcast_packets);
11231
11232 stats->rx_bytes = old_stats->rx_bytes +
11233 get_stat64(&hw_stats->rx_octets);
11234 stats->tx_bytes = old_stats->tx_bytes +
11235 get_stat64(&hw_stats->tx_octets);
11236
11237 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -070011238 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011239 stats->tx_errors = old_stats->tx_errors +
11240 get_stat64(&hw_stats->tx_errors) +
11241 get_stat64(&hw_stats->tx_mac_errors) +
11242 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11243 get_stat64(&hw_stats->tx_discards);
11244
11245 stats->multicast = old_stats->multicast +
11246 get_stat64(&hw_stats->rx_mcast_packets);
11247 stats->collisions = old_stats->collisions +
11248 get_stat64(&hw_stats->tx_collisions);
11249
11250 stats->rx_length_errors = old_stats->rx_length_errors +
11251 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11252 get_stat64(&hw_stats->rx_undersize_packets);
11253
11254 stats->rx_over_errors = old_stats->rx_over_errors +
11255 get_stat64(&hw_stats->rxbds_empty);
11256 stats->rx_frame_errors = old_stats->rx_frame_errors +
11257 get_stat64(&hw_stats->rx_align_errors);
11258 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11259 get_stat64(&hw_stats->tx_discards);
11260 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11261 get_stat64(&hw_stats->tx_carrier_sense_errors);
11262
11263 stats->rx_crc_errors = old_stats->rx_crc_errors +
Matt Carlson65ec6982012-02-28 23:33:37 +000011264 tg3_calc_crc_errors(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011265
John W. Linville4f63b872005-09-12 14:43:18 -070011266 stats->rx_missed_errors = old_stats->rx_missed_errors +
11267 get_stat64(&hw_stats->rx_discards);
11268
Eric Dumazetb0057c52010-10-10 19:55:52 +000011269 stats->rx_dropped = tp->rx_dropped;
Eric Dumazet48855432011-10-24 07:53:03 +000011270 stats->tx_dropped = tp->tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011271}
11272
Linus Torvalds1da177e2005-04-16 15:20:36 -070011273static int tg3_get_regs_len(struct net_device *dev)
11274{
Matt Carlson97bd8e42011-04-13 11:05:04 +000011275 return TG3_REG_BLK_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011276}
11277
11278static void tg3_get_regs(struct net_device *dev,
11279 struct ethtool_regs *regs, void *_p)
11280{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011281 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011282
11283 regs->version = 0;
11284
Matt Carlson97bd8e42011-04-13 11:05:04 +000011285 memset(_p, 0, TG3_REG_BLK_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011286
Matt Carlson80096062010-08-02 11:26:06 +000011287 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080011288 return;
11289
David S. Millerf47c11e2005-06-24 20:18:35 -070011290 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011291
Matt Carlson97bd8e42011-04-13 11:05:04 +000011292 tg3_dump_legacy_regs(tp, (u32 *)_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011293
David S. Millerf47c11e2005-06-24 20:18:35 -070011294 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011295}
11296
11297static int tg3_get_eeprom_len(struct net_device *dev)
11298{
11299 struct tg3 *tp = netdev_priv(dev);
11300
11301 return tp->nvram_size;
11302}
11303
Linus Torvalds1da177e2005-04-16 15:20:36 -070011304static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11305{
11306 struct tg3 *tp = netdev_priv(dev);
11307 int ret;
11308 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -080011309 u32 i, offset, len, b_offset, b_count;
Matt Carlsona9dc5292009-02-25 14:25:30 +000011310 __be32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011311
Joe Perches63c3a662011-04-26 08:12:10 +000011312 if (tg3_flag(tp, NO_NVRAM))
Matt Carlsondf259d82009-04-20 06:57:14 +000011313 return -EINVAL;
11314
Matt Carlson80096062010-08-02 11:26:06 +000011315 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080011316 return -EAGAIN;
11317
Linus Torvalds1da177e2005-04-16 15:20:36 -070011318 offset = eeprom->offset;
11319 len = eeprom->len;
11320 eeprom->len = 0;
11321
11322 eeprom->magic = TG3_EEPROM_MAGIC;
11323
11324 if (offset & 3) {
11325 /* adjustments to start on required 4 byte boundary */
11326 b_offset = offset & 3;
11327 b_count = 4 - b_offset;
11328 if (b_count > len) {
11329 /* i.e. offset=1 len=2 */
11330 b_count = len;
11331 }
Matt Carlsona9dc5292009-02-25 14:25:30 +000011332 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011333 if (ret)
11334 return ret;
Matt Carlsonbe98da62010-07-11 09:31:46 +000011335 memcpy(data, ((char *)&val) + b_offset, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011336 len -= b_count;
11337 offset += b_count;
Matt Carlsonc6cdf432010-04-05 10:19:26 +000011338 eeprom->len += b_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011339 }
11340
Lucas De Marchi25985ed2011-03-30 22:57:33 -030011341 /* read bytes up to the last 4 byte boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011342 pd = &data[eeprom->len];
11343 for (i = 0; i < (len - (len & 3)); i += 4) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000011344 ret = tg3_nvram_read_be32(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011345 if (ret) {
11346 eeprom->len += i;
11347 return ret;
11348 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011349 memcpy(pd + i, &val, 4);
11350 }
11351 eeprom->len += i;
11352
11353 if (len & 3) {
11354 /* read last bytes not ending on 4 byte boundary */
11355 pd = &data[eeprom->len];
11356 b_count = len & 3;
11357 b_offset = offset + len - b_count;
Matt Carlsona9dc5292009-02-25 14:25:30 +000011358 ret = tg3_nvram_read_be32(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011359 if (ret)
11360 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -080011361 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011362 eeprom->len += b_count;
11363 }
11364 return 0;
11365}
11366
Linus Torvalds1da177e2005-04-16 15:20:36 -070011367static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11368{
11369 struct tg3 *tp = netdev_priv(dev);
11370 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -080011371 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011372 u8 *buf;
Matt Carlsona9dc5292009-02-25 14:25:30 +000011373 __be32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011374
Matt Carlson80096062010-08-02 11:26:06 +000011375 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Michael Chanbc1c7562006-03-20 17:48:03 -080011376 return -EAGAIN;
11377
Joe Perches63c3a662011-04-26 08:12:10 +000011378 if (tg3_flag(tp, NO_NVRAM) ||
Matt Carlsondf259d82009-04-20 06:57:14 +000011379 eeprom->magic != TG3_EEPROM_MAGIC)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011380 return -EINVAL;
11381
11382 offset = eeprom->offset;
11383 len = eeprom->len;
11384
11385 if ((b_offset = (offset & 3))) {
11386 /* adjustments to start on required 4 byte boundary */
Matt Carlsona9dc5292009-02-25 14:25:30 +000011387 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011388 if (ret)
11389 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011390 len += b_offset;
11391 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -070011392 if (len < 4)
11393 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011394 }
11395
11396 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -070011397 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011398 /* adjustments to end on required 4 byte boundary */
11399 odd_len = 1;
11400 len = (len + 3) & ~3;
Matt Carlsona9dc5292009-02-25 14:25:30 +000011401 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011402 if (ret)
11403 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011404 }
11405
11406 buf = data;
11407 if (b_offset || odd_len) {
11408 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010011409 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011410 return -ENOMEM;
11411 if (b_offset)
11412 memcpy(buf, &start, 4);
11413 if (odd_len)
11414 memcpy(buf+len-4, &end, 4);
11415 memcpy(buf + b_offset, data, eeprom->len);
11416 }
11417
11418 ret = tg3_nvram_write_block(tp, offset, len, buf);
11419
11420 if (buf != data)
11421 kfree(buf);
11422
11423 return ret;
11424}
11425
11426static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11427{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011428 struct tg3 *tp = netdev_priv(dev);
11429
Joe Perches63c3a662011-04-26 08:12:10 +000011430 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000011431 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011432 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011433 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000011434 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11435 return phy_ethtool_gset(phydev, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011436 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011437
Linus Torvalds1da177e2005-04-16 15:20:36 -070011438 cmd->supported = (SUPPORTED_Autoneg);
11439
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011440 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011441 cmd->supported |= (SUPPORTED_1000baseT_Half |
11442 SUPPORTED_1000baseT_Full);
11443
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011444 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011445 cmd->supported |= (SUPPORTED_100baseT_Half |
11446 SUPPORTED_100baseT_Full |
11447 SUPPORTED_10baseT_Half |
11448 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -080011449 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -070011450 cmd->port = PORT_TP;
11451 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011452 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -070011453 cmd->port = PORT_FIBRE;
11454 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011455
Linus Torvalds1da177e2005-04-16 15:20:36 -070011456 cmd->advertising = tp->link_config.advertising;
Matt Carlson5bb09772011-06-13 13:39:00 +000011457 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11458 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11459 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11460 cmd->advertising |= ADVERTISED_Pause;
11461 } else {
11462 cmd->advertising |= ADVERTISED_Pause |
11463 ADVERTISED_Asym_Pause;
11464 }
11465 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11466 cmd->advertising |= ADVERTISED_Asym_Pause;
11467 }
11468 }
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000011469 if (netif_running(dev) && tp->link_up) {
David Decotigny70739492011-04-27 18:32:40 +000011470 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011471 cmd->duplex = tp->link_config.active_duplex;
Matt Carlson859edb22011-12-08 14:40:16 +000011472 cmd->lp_advertising = tp->link_config.rmt_adv;
Matt Carlsone348c5e2011-11-21 15:01:20 +000011473 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11474 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11475 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11476 else
11477 cmd->eth_tp_mdix = ETH_TP_MDI;
11478 }
Matt Carlson64c22182010-10-14 10:37:44 +000011479 } else {
Matt Carlsone7405222012-02-13 15:20:16 +000011480 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11481 cmd->duplex = DUPLEX_UNKNOWN;
Matt Carlsone348c5e2011-11-21 15:01:20 +000011482 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011483 }
Matt Carlson882e9792009-09-01 13:21:36 +000011484 cmd->phy_address = tp->phy_addr;
Matt Carlson7e5856b2009-02-25 14:23:01 +000011485 cmd->transceiver = XCVR_INTERNAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011486 cmd->autoneg = tp->link_config.autoneg;
11487 cmd->maxtxpkt = 0;
11488 cmd->maxrxpkt = 0;
11489 return 0;
11490}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011491
Linus Torvalds1da177e2005-04-16 15:20:36 -070011492static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11493{
11494 struct tg3 *tp = netdev_priv(dev);
David Decotigny25db0332011-04-27 18:32:39 +000011495 u32 speed = ethtool_cmd_speed(cmd);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011496
Joe Perches63c3a662011-04-26 08:12:10 +000011497 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000011498 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011499 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011500 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000011501 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11502 return phy_ethtool_sset(phydev, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011503 }
11504
Matt Carlson7e5856b2009-02-25 14:23:01 +000011505 if (cmd->autoneg != AUTONEG_ENABLE &&
11506 cmd->autoneg != AUTONEG_DISABLE)
Michael Chan37ff2382005-10-26 15:49:51 -070011507 return -EINVAL;
Matt Carlson7e5856b2009-02-25 14:23:01 +000011508
11509 if (cmd->autoneg == AUTONEG_DISABLE &&
11510 cmd->duplex != DUPLEX_FULL &&
11511 cmd->duplex != DUPLEX_HALF)
Michael Chan37ff2382005-10-26 15:49:51 -070011512 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011513
Matt Carlson7e5856b2009-02-25 14:23:01 +000011514 if (cmd->autoneg == AUTONEG_ENABLE) {
11515 u32 mask = ADVERTISED_Autoneg |
11516 ADVERTISED_Pause |
11517 ADVERTISED_Asym_Pause;
11518
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011519 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
Matt Carlson7e5856b2009-02-25 14:23:01 +000011520 mask |= ADVERTISED_1000baseT_Half |
11521 ADVERTISED_1000baseT_Full;
11522
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011523 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
Matt Carlson7e5856b2009-02-25 14:23:01 +000011524 mask |= ADVERTISED_100baseT_Half |
11525 ADVERTISED_100baseT_Full |
11526 ADVERTISED_10baseT_Half |
11527 ADVERTISED_10baseT_Full |
11528 ADVERTISED_TP;
11529 else
11530 mask |= ADVERTISED_FIBRE;
11531
11532 if (cmd->advertising & ~mask)
11533 return -EINVAL;
11534
11535 mask &= (ADVERTISED_1000baseT_Half |
11536 ADVERTISED_1000baseT_Full |
11537 ADVERTISED_100baseT_Half |
11538 ADVERTISED_100baseT_Full |
11539 ADVERTISED_10baseT_Half |
11540 ADVERTISED_10baseT_Full);
11541
11542 cmd->advertising &= mask;
11543 } else {
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011544 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
David Decotigny25db0332011-04-27 18:32:39 +000011545 if (speed != SPEED_1000)
Matt Carlson7e5856b2009-02-25 14:23:01 +000011546 return -EINVAL;
11547
11548 if (cmd->duplex != DUPLEX_FULL)
11549 return -EINVAL;
11550 } else {
David Decotigny25db0332011-04-27 18:32:39 +000011551 if (speed != SPEED_100 &&
11552 speed != SPEED_10)
Matt Carlson7e5856b2009-02-25 14:23:01 +000011553 return -EINVAL;
11554 }
11555 }
11556
David S. Millerf47c11e2005-06-24 20:18:35 -070011557 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011558
11559 tp->link_config.autoneg = cmd->autoneg;
11560 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -070011561 tp->link_config.advertising = (cmd->advertising |
11562 ADVERTISED_Autoneg);
Matt Carlsone7405222012-02-13 15:20:16 +000011563 tp->link_config.speed = SPEED_UNKNOWN;
11564 tp->link_config.duplex = DUPLEX_UNKNOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011565 } else {
11566 tp->link_config.advertising = 0;
David Decotigny25db0332011-04-27 18:32:39 +000011567 tp->link_config.speed = speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011568 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011569 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011570
Linus Torvalds1da177e2005-04-16 15:20:36 -070011571 if (netif_running(dev))
11572 tg3_setup_phy(tp, 1);
11573
David S. Millerf47c11e2005-06-24 20:18:35 -070011574 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011575
Linus Torvalds1da177e2005-04-16 15:20:36 -070011576 return 0;
11577}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011578
Linus Torvalds1da177e2005-04-16 15:20:36 -070011579static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11580{
11581 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011582
Rick Jones68aad782011-11-07 13:29:27 +000011583 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11584 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11585 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11586 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011587}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011588
Linus Torvalds1da177e2005-04-16 15:20:36 -070011589static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11590{
11591 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011592
Joe Perches63c3a662011-04-26 08:12:10 +000011593 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -070011594 wol->supported = WAKE_MAGIC;
11595 else
11596 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011597 wol->wolopts = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000011598 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011599 wol->wolopts = WAKE_MAGIC;
11600 memset(&wol->sopass, 0, sizeof(wol->sopass));
11601}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011602
Linus Torvalds1da177e2005-04-16 15:20:36 -070011603static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11604{
11605 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011606 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011607
Linus Torvalds1da177e2005-04-16 15:20:36 -070011608 if (wol->wolopts & ~WAKE_MAGIC)
11609 return -EINVAL;
11610 if ((wol->wolopts & WAKE_MAGIC) &&
Joe Perches63c3a662011-04-26 08:12:10 +000011611 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011612 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011613
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011614 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11615
David S. Millerf47c11e2005-06-24 20:18:35 -070011616 spin_lock_bh(&tp->lock);
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011617 if (device_may_wakeup(dp))
Joe Perches63c3a662011-04-26 08:12:10 +000011618 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysockif2dc0d12010-10-25 13:01:55 +000011619 else
Joe Perches63c3a662011-04-26 08:12:10 +000011620 tg3_flag_clear(tp, WOL_ENABLE);
David S. Millerf47c11e2005-06-24 20:18:35 -070011621 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011622
Linus Torvalds1da177e2005-04-16 15:20:36 -070011623 return 0;
11624}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011625
Linus Torvalds1da177e2005-04-16 15:20:36 -070011626static u32 tg3_get_msglevel(struct net_device *dev)
11627{
11628 struct tg3 *tp = netdev_priv(dev);
11629 return tp->msg_enable;
11630}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011631
Linus Torvalds1da177e2005-04-16 15:20:36 -070011632static void tg3_set_msglevel(struct net_device *dev, u32 value)
11633{
11634 struct tg3 *tp = netdev_priv(dev);
11635 tp->msg_enable = value;
11636}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011637
Linus Torvalds1da177e2005-04-16 15:20:36 -070011638static int tg3_nway_reset(struct net_device *dev)
11639{
11640 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011641 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011642
Linus Torvalds1da177e2005-04-16 15:20:36 -070011643 if (!netif_running(dev))
11644 return -EAGAIN;
11645
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011646 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Michael Chanc94e3942005-09-27 12:12:42 -070011647 return -EINVAL;
11648
Joe Perches63c3a662011-04-26 08:12:10 +000011649 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011650 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011651 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000011652 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011653 } else {
11654 u32 bmcr;
11655
11656 spin_lock_bh(&tp->lock);
11657 r = -EINVAL;
11658 tg3_readphy(tp, MII_BMCR, &bmcr);
11659 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11660 ((bmcr & BMCR_ANENABLE) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011661 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011662 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11663 BMCR_ANENABLE);
11664 r = 0;
11665 }
11666 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011667 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011668
Linus Torvalds1da177e2005-04-16 15:20:36 -070011669 return r;
11670}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011671
Linus Torvalds1da177e2005-04-16 15:20:36 -070011672static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11673{
11674 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011675
Matt Carlson2c49a442010-09-30 10:34:35 +000011676 ering->rx_max_pending = tp->rx_std_ring_mask;
Joe Perches63c3a662011-04-26 08:12:10 +000011677 if (tg3_flag(tp, JUMBO_RING_ENABLE))
Matt Carlson2c49a442010-09-30 10:34:35 +000011678 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
Michael Chan4f81c322006-03-20 21:33:42 -080011679 else
11680 ering->rx_jumbo_max_pending = 0;
11681
11682 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011683
11684 ering->rx_pending = tp->rx_pending;
Joe Perches63c3a662011-04-26 08:12:10 +000011685 if (tg3_flag(tp, JUMBO_RING_ENABLE))
Michael Chan4f81c322006-03-20 21:33:42 -080011686 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11687 else
11688 ering->rx_jumbo_pending = 0;
11689
Matt Carlsonf3f3f272009-08-28 14:03:21 +000011690 ering->tx_pending = tp->napi[0].tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011691}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011692
Linus Torvalds1da177e2005-04-16 15:20:36 -070011693static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11694{
11695 struct tg3 *tp = netdev_priv(dev);
Matt Carlson646c9ed2009-09-01 12:58:41 +000011696 int i, irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011697
Matt Carlson2c49a442010-09-30 10:34:35 +000011698 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11699 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
Michael Chanbc3a9252006-10-18 20:55:18 -070011700 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11701 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Joe Perches63c3a662011-04-26 08:12:10 +000011702 (tg3_flag(tp, TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -070011703 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011704 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011705
Michael Chanbbe832c2005-06-24 20:20:04 -070011706 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011707 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011708 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070011709 irq_sync = 1;
11710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011711
Michael Chanbbe832c2005-06-24 20:20:04 -070011712 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011713
Linus Torvalds1da177e2005-04-16 15:20:36 -070011714 tp->rx_pending = ering->rx_pending;
11715
Joe Perches63c3a662011-04-26 08:12:10 +000011716 if (tg3_flag(tp, MAX_RXPEND_64) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011717 tp->rx_pending > 63)
11718 tp->rx_pending = 63;
11719 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
Matt Carlson646c9ed2009-09-01 12:58:41 +000011720
Matt Carlson6fd45cb2010-09-15 08:59:57 +000011721 for (i = 0; i < tp->irq_max; i++)
Matt Carlson646c9ed2009-09-01 12:58:41 +000011722 tp->napi[i].tx_pending = ering->tx_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011723
11724 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -070011725 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -070011726 err = tg3_restart_hw(tp, 1);
11727 if (!err)
11728 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011729 }
11730
David S. Millerf47c11e2005-06-24 20:18:35 -070011731 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011732
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011733 if (irq_sync && !err)
11734 tg3_phy_start(tp);
11735
Michael Chanb9ec6c12006-07-25 16:37:27 -070011736 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011737}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011738
Linus Torvalds1da177e2005-04-16 15:20:36 -070011739static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11740{
11741 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011742
Joe Perches63c3a662011-04-26 08:12:10 +000011743 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
Matt Carlson8d018622007-12-20 20:05:44 -080011744
Matt Carlson4a2db502011-12-08 14:40:17 +000011745 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
Matt Carlson8d018622007-12-20 20:05:44 -080011746 epause->rx_pause = 1;
11747 else
11748 epause->rx_pause = 0;
11749
Matt Carlson4a2db502011-12-08 14:40:17 +000011750 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
Matt Carlson8d018622007-12-20 20:05:44 -080011751 epause->tx_pause = 1;
11752 else
11753 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011754}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011755
Linus Torvalds1da177e2005-04-16 15:20:36 -070011756static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11757{
11758 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011759 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011760
Joe Perches63c3a662011-04-26 08:12:10 +000011761 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson27121682010-02-17 15:16:57 +000011762 u32 newadv;
11763 struct phy_device *phydev;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011764
Matt Carlson27121682010-02-17 15:16:57 +000011765 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011766
Matt Carlson27121682010-02-17 15:16:57 +000011767 if (!(phydev->supported & SUPPORTED_Pause) ||
11768 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
Nicolas Kaiser2259dca2010-10-07 23:29:27 +000011769 (epause->rx_pause != epause->tx_pause)))
Matt Carlson27121682010-02-17 15:16:57 +000011770 return -EINVAL;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011771
Matt Carlson27121682010-02-17 15:16:57 +000011772 tp->link_config.flowctrl = 0;
11773 if (epause->rx_pause) {
11774 tp->link_config.flowctrl |= FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011775
Matt Carlson27121682010-02-17 15:16:57 +000011776 if (epause->tx_pause) {
Steve Glendinninge18ce342008-12-16 02:00:00 -080011777 tp->link_config.flowctrl |= FLOW_CTRL_TX;
Matt Carlson27121682010-02-17 15:16:57 +000011778 newadv = ADVERTISED_Pause;
11779 } else
11780 newadv = ADVERTISED_Pause |
11781 ADVERTISED_Asym_Pause;
11782 } else if (epause->tx_pause) {
11783 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11784 newadv = ADVERTISED_Asym_Pause;
11785 } else
11786 newadv = 0;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011787
Matt Carlson27121682010-02-17 15:16:57 +000011788 if (epause->autoneg)
Joe Perches63c3a662011-04-26 08:12:10 +000011789 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlson27121682010-02-17 15:16:57 +000011790 else
Joe Perches63c3a662011-04-26 08:12:10 +000011791 tg3_flag_clear(tp, PAUSE_AUTONEG);
Matt Carlson27121682010-02-17 15:16:57 +000011792
Matt Carlsonf07e9af2010-08-02 11:26:07 +000011793 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson27121682010-02-17 15:16:57 +000011794 u32 oldadv = phydev->advertising &
11795 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11796 if (oldadv != newadv) {
11797 phydev->advertising &=
11798 ~(ADVERTISED_Pause |
11799 ADVERTISED_Asym_Pause);
11800 phydev->advertising |= newadv;
11801 if (phydev->autoneg) {
11802 /*
11803 * Always renegotiate the link to
11804 * inform our link partner of our
11805 * flow control settings, even if the
11806 * flow control is forced. Let
11807 * tg3_adjust_link() do the final
11808 * flow control setup.
11809 */
11810 return phy_start_aneg(phydev);
11811 }
11812 }
11813
11814 if (!epause->autoneg)
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011815 tg3_setup_flow_control(tp, 0, 0);
Matt Carlson27121682010-02-17 15:16:57 +000011816 } else {
Matt Carlsonc6700ce2012-02-13 15:20:15 +000011817 tp->link_config.advertising &=
Matt Carlson27121682010-02-17 15:16:57 +000011818 ~(ADVERTISED_Pause |
11819 ADVERTISED_Asym_Pause);
Matt Carlsonc6700ce2012-02-13 15:20:15 +000011820 tp->link_config.advertising |= newadv;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011821 }
11822 } else {
11823 int irq_sync = 0;
11824
11825 if (netif_running(dev)) {
11826 tg3_netif_stop(tp);
11827 irq_sync = 1;
11828 }
11829
11830 tg3_full_lock(tp, irq_sync);
11831
11832 if (epause->autoneg)
Joe Perches63c3a662011-04-26 08:12:10 +000011833 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011834 else
Joe Perches63c3a662011-04-26 08:12:10 +000011835 tg3_flag_clear(tp, PAUSE_AUTONEG);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011836 if (epause->rx_pause)
Steve Glendinninge18ce342008-12-16 02:00:00 -080011837 tp->link_config.flowctrl |= FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011838 else
Steve Glendinninge18ce342008-12-16 02:00:00 -080011839 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011840 if (epause->tx_pause)
Steve Glendinninge18ce342008-12-16 02:00:00 -080011841 tp->link_config.flowctrl |= FLOW_CTRL_TX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011842 else
Steve Glendinninge18ce342008-12-16 02:00:00 -080011843 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011844
11845 if (netif_running(dev)) {
11846 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11847 err = tg3_restart_hw(tp, 1);
11848 if (!err)
11849 tg3_netif_start(tp);
11850 }
11851
11852 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070011853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011854
Michael Chanb9ec6c12006-07-25 16:37:27 -070011855 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011856}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011857
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011858static int tg3_get_sset_count(struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011859{
Jeff Garzikb9f2c042007-10-03 18:07:32 -070011860 switch (sset) {
11861 case ETH_SS_TEST:
11862 return TG3_NUM_TEST;
11863 case ETH_SS_STATS:
11864 return TG3_NUM_STATS;
11865 default:
11866 return -EOPNOTSUPP;
11867 }
Michael Chan4cafd3f2005-05-29 14:56:34 -070011868}
11869
Matt Carlson90415472011-12-16 13:33:23 +000011870static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11871 u32 *rules __always_unused)
11872{
11873 struct tg3 *tp = netdev_priv(dev);
11874
11875 if (!tg3_flag(tp, SUPPORT_MSIX))
11876 return -EOPNOTSUPP;
11877
11878 switch (info->cmd) {
11879 case ETHTOOL_GRXRINGS:
11880 if (netif_running(tp->dev))
Michael Chan91024262012-09-28 07:12:38 +000011881 info->data = tp->rxq_cnt;
Matt Carlson90415472011-12-16 13:33:23 +000011882 else {
11883 info->data = num_online_cpus();
Michael Chan91024262012-09-28 07:12:38 +000011884 if (info->data > TG3_RSS_MAX_NUM_QS)
11885 info->data = TG3_RSS_MAX_NUM_QS;
Matt Carlson90415472011-12-16 13:33:23 +000011886 }
11887
11888 /* The first interrupt vector only
11889 * handles link interrupts.
11890 */
11891 info->data -= 1;
11892 return 0;
11893
11894 default:
11895 return -EOPNOTSUPP;
11896 }
11897}
11898
11899static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11900{
11901 u32 size = 0;
11902 struct tg3 *tp = netdev_priv(dev);
11903
11904 if (tg3_flag(tp, SUPPORT_MSIX))
11905 size = TG3_RSS_INDIR_TBL_SIZE;
11906
11907 return size;
11908}
11909
11910static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11911{
11912 struct tg3 *tp = netdev_priv(dev);
11913 int i;
11914
11915 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11916 indir[i] = tp->rss_ind_tbl[i];
11917
11918 return 0;
11919}
11920
11921static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11922{
11923 struct tg3 *tp = netdev_priv(dev);
11924 size_t i;
11925
11926 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11927 tp->rss_ind_tbl[i] = indir[i];
11928
11929 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11930 return 0;
11931
11932 /* It is legal to write the indirection
11933 * table while the device is running.
11934 */
11935 tg3_full_lock(tp, 0);
11936 tg3_rss_write_indir_tbl(tp);
11937 tg3_full_unlock(tp);
11938
11939 return 0;
11940}
11941
Michael Chan09681692012-09-28 07:12:42 +000011942static void tg3_get_channels(struct net_device *dev,
11943 struct ethtool_channels *channel)
11944{
11945 struct tg3 *tp = netdev_priv(dev);
11946 u32 deflt_qs = netif_get_num_default_rss_queues();
11947
11948 channel->max_rx = tp->rxq_max;
11949 channel->max_tx = tp->txq_max;
11950
11951 if (netif_running(dev)) {
11952 channel->rx_count = tp->rxq_cnt;
11953 channel->tx_count = tp->txq_cnt;
11954 } else {
11955 if (tp->rxq_req)
11956 channel->rx_count = tp->rxq_req;
11957 else
11958 channel->rx_count = min(deflt_qs, tp->rxq_max);
11959
11960 if (tp->txq_req)
11961 channel->tx_count = tp->txq_req;
11962 else
11963 channel->tx_count = min(deflt_qs, tp->txq_max);
11964 }
11965}
11966
11967static int tg3_set_channels(struct net_device *dev,
11968 struct ethtool_channels *channel)
11969{
11970 struct tg3 *tp = netdev_priv(dev);
11971
11972 if (!tg3_flag(tp, SUPPORT_MSIX))
11973 return -EOPNOTSUPP;
11974
11975 if (channel->rx_count > tp->rxq_max ||
11976 channel->tx_count > tp->txq_max)
11977 return -EINVAL;
11978
11979 tp->rxq_req = channel->rx_count;
11980 tp->txq_req = channel->tx_count;
11981
11982 if (!netif_running(dev))
11983 return 0;
11984
11985 tg3_stop(tp);
11986
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000011987 tg3_carrier_off(tp);
Michael Chan09681692012-09-28 07:12:42 +000011988
Matt Carlsonbe947302012-12-03 19:36:57 +000011989 tg3_start(tp, true, false, false);
Michael Chan09681692012-09-28 07:12:42 +000011990
11991 return 0;
11992}
11993
Matt Carlsonde6f31e2010-04-12 06:58:30 +000011994static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011995{
11996 switch (stringset) {
11997 case ETH_SS_STATS:
11998 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11999 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -070012000 case ETH_SS_TEST:
12001 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12002 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012003 default:
12004 WARN_ON(1); /* we need a WARN() */
12005 break;
12006 }
12007}
12008
stephen hemminger81b87092011-04-04 08:43:50 +000012009static int tg3_set_phys_id(struct net_device *dev,
12010 enum ethtool_phys_id_state state)
Michael Chan4009a932005-09-05 17:52:54 -070012011{
12012 struct tg3 *tp = netdev_priv(dev);
Michael Chan4009a932005-09-05 17:52:54 -070012013
12014 if (!netif_running(tp->dev))
12015 return -EAGAIN;
12016
stephen hemminger81b87092011-04-04 08:43:50 +000012017 switch (state) {
12018 case ETHTOOL_ID_ACTIVE:
Allan, Bruce Wfce55922011-04-13 13:09:10 +000012019 return 1; /* cycle on/off once per second */
Michael Chan4009a932005-09-05 17:52:54 -070012020
stephen hemminger81b87092011-04-04 08:43:50 +000012021 case ETHTOOL_ID_ON:
12022 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12023 LED_CTRL_1000MBPS_ON |
12024 LED_CTRL_100MBPS_ON |
12025 LED_CTRL_10MBPS_ON |
12026 LED_CTRL_TRAFFIC_OVERRIDE |
12027 LED_CTRL_TRAFFIC_BLINK |
12028 LED_CTRL_TRAFFIC_LED);
12029 break;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012030
stephen hemminger81b87092011-04-04 08:43:50 +000012031 case ETHTOOL_ID_OFF:
12032 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12033 LED_CTRL_TRAFFIC_OVERRIDE);
12034 break;
Michael Chan4009a932005-09-05 17:52:54 -070012035
stephen hemminger81b87092011-04-04 08:43:50 +000012036 case ETHTOOL_ID_INACTIVE:
12037 tw32(MAC_LED_CTRL, tp->led_ctrl);
12038 break;
Michael Chan4009a932005-09-05 17:52:54 -070012039 }
stephen hemminger81b87092011-04-04 08:43:50 +000012040
Michael Chan4009a932005-09-05 17:52:54 -070012041 return 0;
12042}
12043
Matt Carlsonde6f31e2010-04-12 06:58:30 +000012044static void tg3_get_ethtool_stats(struct net_device *dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012045 struct ethtool_stats *estats, u64 *tmp_stats)
12046{
12047 struct tg3 *tp = netdev_priv(dev);
Matt Carlson0e6c9da2011-12-08 14:40:13 +000012048
Matt Carlsonb546e462012-02-13 15:20:09 +000012049 if (tp->hw_stats)
12050 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12051 else
12052 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
Linus Torvalds1da177e2005-04-16 15:20:36 -070012053}
12054
Matt Carlson535a4902011-07-20 10:20:56 +000012055static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
Matt Carlsonc3e94502011-04-13 11:05:08 +000012056{
12057 int i;
12058 __be32 *buf;
12059 u32 offset = 0, len = 0;
12060 u32 magic, val;
12061
Joe Perches63c3a662011-04-26 08:12:10 +000012062 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
Matt Carlsonc3e94502011-04-13 11:05:08 +000012063 return NULL;
12064
12065 if (magic == TG3_EEPROM_MAGIC) {
12066 for (offset = TG3_NVM_DIR_START;
12067 offset < TG3_NVM_DIR_END;
12068 offset += TG3_NVM_DIRENT_SIZE) {
12069 if (tg3_nvram_read(tp, offset, &val))
12070 return NULL;
12071
12072 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12073 TG3_NVM_DIRTYPE_EXTVPD)
12074 break;
12075 }
12076
12077 if (offset != TG3_NVM_DIR_END) {
12078 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12079 if (tg3_nvram_read(tp, offset + 4, &offset))
12080 return NULL;
12081
12082 offset = tg3_nvram_logical_addr(tp, offset);
12083 }
12084 }
12085
12086 if (!offset || !len) {
12087 offset = TG3_NVM_VPD_OFF;
12088 len = TG3_NVM_VPD_LEN;
12089 }
12090
12091 buf = kmalloc(len, GFP_KERNEL);
12092 if (buf == NULL)
12093 return NULL;
12094
12095 if (magic == TG3_EEPROM_MAGIC) {
12096 for (i = 0; i < len; i += 4) {
12097 /* The data is in little-endian format in NVRAM.
12098 * Use the big-endian read routines to preserve
12099 * the byte order as it exists in NVRAM.
12100 */
12101 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12102 goto error;
12103 }
12104 } else {
12105 u8 *ptr;
12106 ssize_t cnt;
12107 unsigned int pos = 0;
12108
12109 ptr = (u8 *)&buf[0];
12110 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12111 cnt = pci_read_vpd(tp->pdev, pos,
12112 len - pos, ptr);
12113 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12114 cnt = 0;
12115 else if (cnt < 0)
12116 goto error;
12117 }
12118 if (pos != len)
12119 goto error;
12120 }
12121
Matt Carlson535a4902011-07-20 10:20:56 +000012122 *vpdlen = len;
12123
Matt Carlsonc3e94502011-04-13 11:05:08 +000012124 return buf;
12125
12126error:
12127 kfree(buf);
12128 return NULL;
12129}
12130
Michael Chan566f86a2005-05-29 14:56:58 -070012131#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -080012132#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12133#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12134#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Matt Carlson727a6d92011-06-13 13:38:58 +000012135#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12136#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
Matt Carlsonbda18fa2011-07-20 10:20:57 +000012137#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
Michael Chanb16250e2006-09-27 16:10:14 -070012138#define NVRAM_SELFBOOT_HW_SIZE 0x20
12139#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -070012140
12141static int tg3_test_nvram(struct tg3 *tp)
12142{
Matt Carlson535a4902011-07-20 10:20:56 +000012143 u32 csum, magic, len;
Matt Carlsona9dc5292009-02-25 14:25:30 +000012144 __be32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +010012145 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -070012146
Joe Perches63c3a662011-04-26 08:12:10 +000012147 if (tg3_flag(tp, NO_NVRAM))
Matt Carlsondf259d82009-04-20 06:57:14 +000012148 return 0;
12149
Matt Carlsone4f34112009-02-25 14:25:00 +000012150 if (tg3_nvram_read(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080012151 return -EIO;
12152
Michael Chan1b277772006-03-20 22:27:48 -080012153 if (magic == TG3_EEPROM_MAGIC)
12154 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -070012155 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -080012156 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12157 TG3_EEPROM_SB_FORMAT_1) {
12158 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12159 case TG3_EEPROM_SB_REVISION_0:
12160 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12161 break;
12162 case TG3_EEPROM_SB_REVISION_2:
12163 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12164 break;
12165 case TG3_EEPROM_SB_REVISION_3:
12166 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12167 break;
Matt Carlson727a6d92011-06-13 13:38:58 +000012168 case TG3_EEPROM_SB_REVISION_4:
12169 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12170 break;
12171 case TG3_EEPROM_SB_REVISION_5:
12172 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12173 break;
12174 case TG3_EEPROM_SB_REVISION_6:
12175 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12176 break;
Matt Carlsona5767de2007-11-12 21:10:58 -080012177 default:
Matt Carlson727a6d92011-06-13 13:38:58 +000012178 return -EIO;
Matt Carlsona5767de2007-11-12 21:10:58 -080012179 }
12180 } else
Michael Chan1b277772006-03-20 22:27:48 -080012181 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -070012182 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12183 size = NVRAM_SELFBOOT_HW_SIZE;
12184 else
Michael Chan1b277772006-03-20 22:27:48 -080012185 return -EIO;
12186
12187 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -070012188 if (buf == NULL)
12189 return -ENOMEM;
12190
Michael Chan1b277772006-03-20 22:27:48 -080012191 err = -EIO;
12192 for (i = 0, j = 0; i < size; i += 4, j++) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000012193 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12194 if (err)
Michael Chan566f86a2005-05-29 14:56:58 -070012195 break;
Michael Chan566f86a2005-05-29 14:56:58 -070012196 }
Michael Chan1b277772006-03-20 22:27:48 -080012197 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -070012198 goto out;
12199
Michael Chan1b277772006-03-20 22:27:48 -080012200 /* Selfboot format */
Matt Carlsona9dc5292009-02-25 14:25:30 +000012201 magic = be32_to_cpu(buf[0]);
Al Virob9fc7dc2007-12-17 22:59:57 -080012202 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -070012203 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -080012204 u8 *buf8 = (u8 *) buf, csum8 = 0;
12205
Al Virob9fc7dc2007-12-17 22:59:57 -080012206 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -080012207 TG3_EEPROM_SB_REVISION_2) {
12208 /* For rev 2, the csum doesn't include the MBA. */
12209 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12210 csum8 += buf8[i];
12211 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12212 csum8 += buf8[i];
12213 } else {
12214 for (i = 0; i < size; i++)
12215 csum8 += buf8[i];
12216 }
Michael Chan1b277772006-03-20 22:27:48 -080012217
Adrian Bunkad96b482006-04-05 22:21:04 -070012218 if (csum8 == 0) {
12219 err = 0;
12220 goto out;
12221 }
12222
12223 err = -EIO;
12224 goto out;
Michael Chan1b277772006-03-20 22:27:48 -080012225 }
Michael Chan566f86a2005-05-29 14:56:58 -070012226
Al Virob9fc7dc2007-12-17 22:59:57 -080012227 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -070012228 TG3_EEPROM_MAGIC_HW) {
12229 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
Matt Carlsona9dc5292009-02-25 14:25:30 +000012230 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
Michael Chanb16250e2006-09-27 16:10:14 -070012231 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -070012232
12233 /* Separate the parity bits and the data bytes. */
12234 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12235 if ((i == 0) || (i == 8)) {
12236 int l;
12237 u8 msk;
12238
12239 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12240 parity[k++] = buf8[i] & msk;
12241 i++;
Matt Carlson859a588792010-04-05 10:19:28 +000012242 } else if (i == 16) {
Michael Chanb16250e2006-09-27 16:10:14 -070012243 int l;
12244 u8 msk;
12245
12246 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12247 parity[k++] = buf8[i] & msk;
12248 i++;
12249
12250 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12251 parity[k++] = buf8[i] & msk;
12252 i++;
12253 }
12254 data[j++] = buf8[i];
12255 }
12256
12257 err = -EIO;
12258 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12259 u8 hw8 = hweight8(data[i]);
12260
12261 if ((hw8 & 0x1) && parity[i])
12262 goto out;
12263 else if (!(hw8 & 0x1) && !parity[i])
12264 goto out;
12265 }
12266 err = 0;
12267 goto out;
12268 }
12269
Matt Carlson01c3a392011-03-09 16:58:20 +000012270 err = -EIO;
12271
Michael Chan566f86a2005-05-29 14:56:58 -070012272 /* Bootstrap checksum at offset 0x10 */
12273 csum = calc_crc((unsigned char *) buf, 0x10);
Matt Carlson01c3a392011-03-09 16:58:20 +000012274 if (csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -070012275 goto out;
12276
12277 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12278 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Matt Carlson01c3a392011-03-09 16:58:20 +000012279 if (csum != le32_to_cpu(buf[0xfc/4]))
Matt Carlsona9dc5292009-02-25 14:25:30 +000012280 goto out;
Michael Chan566f86a2005-05-29 14:56:58 -070012281
Matt Carlsonc3e94502011-04-13 11:05:08 +000012282 kfree(buf);
12283
Matt Carlson535a4902011-07-20 10:20:56 +000012284 buf = tg3_vpd_readblock(tp, &len);
Matt Carlsonc3e94502011-04-13 11:05:08 +000012285 if (!buf)
12286 return -ENOMEM;
Matt Carlsond4894f32011-03-09 16:58:21 +000012287
Matt Carlson535a4902011-07-20 10:20:56 +000012288 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
Matt Carlsond4894f32011-03-09 16:58:21 +000012289 if (i > 0) {
12290 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12291 if (j < 0)
12292 goto out;
12293
Matt Carlson535a4902011-07-20 10:20:56 +000012294 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
Matt Carlsond4894f32011-03-09 16:58:21 +000012295 goto out;
12296
12297 i += PCI_VPD_LRDT_TAG_SIZE;
12298 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12299 PCI_VPD_RO_KEYWORD_CHKSUM);
12300 if (j > 0) {
12301 u8 csum8 = 0;
12302
12303 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12304
12305 for (i = 0; i <= j; i++)
12306 csum8 += ((u8 *)buf)[i];
12307
12308 if (csum8)
12309 goto out;
12310 }
12311 }
12312
Michael Chan566f86a2005-05-29 14:56:58 -070012313 err = 0;
12314
12315out:
12316 kfree(buf);
12317 return err;
12318}
12319
Michael Chanca430072005-05-29 14:57:23 -070012320#define TG3_SERDES_TIMEOUT_SEC 2
12321#define TG3_COPPER_TIMEOUT_SEC 6
12322
12323static int tg3_test_link(struct tg3 *tp)
12324{
12325 int i, max;
12326
12327 if (!netif_running(tp->dev))
12328 return -ENODEV;
12329
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012330 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -070012331 max = TG3_SERDES_TIMEOUT_SEC;
12332 else
12333 max = TG3_COPPER_TIMEOUT_SEC;
12334
12335 for (i = 0; i < max; i++) {
Nithin Nayak Sujirf4a46d12012-11-14 14:44:27 +000012336 if (tp->link_up)
Michael Chanca430072005-05-29 14:57:23 -070012337 return 0;
12338
12339 if (msleep_interruptible(1000))
12340 break;
12341 }
12342
12343 return -EIO;
12344}
12345
Michael Chana71116d2005-05-29 14:58:11 -070012346/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -080012347static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -070012348{
Michael Chanb16250e2006-09-27 16:10:14 -070012349 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -070012350 u32 offset, read_mask, write_mask, val, save_val, read_val;
12351 static struct {
12352 u16 offset;
12353 u16 flags;
12354#define TG3_FL_5705 0x1
12355#define TG3_FL_NOT_5705 0x2
12356#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -070012357#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -070012358 u32 read_mask;
12359 u32 write_mask;
12360 } reg_tbl[] = {
12361 /* MAC Control Registers */
12362 { MAC_MODE, TG3_FL_NOT_5705,
12363 0x00000000, 0x00ef6f8c },
12364 { MAC_MODE, TG3_FL_5705,
12365 0x00000000, 0x01ef6b8c },
12366 { MAC_STATUS, TG3_FL_NOT_5705,
12367 0x03800107, 0x00000000 },
12368 { MAC_STATUS, TG3_FL_5705,
12369 0x03800100, 0x00000000 },
12370 { MAC_ADDR_0_HIGH, 0x0000,
12371 0x00000000, 0x0000ffff },
12372 { MAC_ADDR_0_LOW, 0x0000,
Matt Carlsonc6cdf432010-04-05 10:19:26 +000012373 0x00000000, 0xffffffff },
Michael Chana71116d2005-05-29 14:58:11 -070012374 { MAC_RX_MTU_SIZE, 0x0000,
12375 0x00000000, 0x0000ffff },
12376 { MAC_TX_MODE, 0x0000,
12377 0x00000000, 0x00000070 },
12378 { MAC_TX_LENGTHS, 0x0000,
12379 0x00000000, 0x00003fff },
12380 { MAC_RX_MODE, TG3_FL_NOT_5705,
12381 0x00000000, 0x000007fc },
12382 { MAC_RX_MODE, TG3_FL_5705,
12383 0x00000000, 0x000007dc },
12384 { MAC_HASH_REG_0, 0x0000,
12385 0x00000000, 0xffffffff },
12386 { MAC_HASH_REG_1, 0x0000,
12387 0x00000000, 0xffffffff },
12388 { MAC_HASH_REG_2, 0x0000,
12389 0x00000000, 0xffffffff },
12390 { MAC_HASH_REG_3, 0x0000,
12391 0x00000000, 0xffffffff },
12392
12393 /* Receive Data and Receive BD Initiator Control Registers. */
12394 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12395 0x00000000, 0xffffffff },
12396 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12397 0x00000000, 0xffffffff },
12398 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12399 0x00000000, 0x00000003 },
12400 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12401 0x00000000, 0xffffffff },
12402 { RCVDBDI_STD_BD+0, 0x0000,
12403 0x00000000, 0xffffffff },
12404 { RCVDBDI_STD_BD+4, 0x0000,
12405 0x00000000, 0xffffffff },
12406 { RCVDBDI_STD_BD+8, 0x0000,
12407 0x00000000, 0xffff0002 },
12408 { RCVDBDI_STD_BD+0xc, 0x0000,
12409 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012410
Michael Chana71116d2005-05-29 14:58:11 -070012411 /* Receive BD Initiator Control Registers. */
12412 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12413 0x00000000, 0xffffffff },
12414 { RCVBDI_STD_THRESH, TG3_FL_5705,
12415 0x00000000, 0x000003ff },
12416 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12417 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012418
Michael Chana71116d2005-05-29 14:58:11 -070012419 /* Host Coalescing Control Registers. */
12420 { HOSTCC_MODE, TG3_FL_NOT_5705,
12421 0x00000000, 0x00000004 },
12422 { HOSTCC_MODE, TG3_FL_5705,
12423 0x00000000, 0x000000f6 },
12424 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12425 0x00000000, 0xffffffff },
12426 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12427 0x00000000, 0x000003ff },
12428 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12429 0x00000000, 0xffffffff },
12430 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12431 0x00000000, 0x000003ff },
12432 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12433 0x00000000, 0xffffffff },
12434 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12435 0x00000000, 0x000000ff },
12436 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12437 0x00000000, 0xffffffff },
12438 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12439 0x00000000, 0x000000ff },
12440 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12441 0x00000000, 0xffffffff },
12442 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12443 0x00000000, 0xffffffff },
12444 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12445 0x00000000, 0xffffffff },
12446 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12447 0x00000000, 0x000000ff },
12448 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12449 0x00000000, 0xffffffff },
12450 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12451 0x00000000, 0x000000ff },
12452 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12453 0x00000000, 0xffffffff },
12454 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12455 0x00000000, 0xffffffff },
12456 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12457 0x00000000, 0xffffffff },
12458 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12459 0x00000000, 0xffffffff },
12460 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12461 0x00000000, 0xffffffff },
12462 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12463 0xffffffff, 0x00000000 },
12464 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12465 0xffffffff, 0x00000000 },
12466
12467 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -070012468 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -070012469 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -070012470 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -070012471 0x00000000, 0x007fffff },
12472 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12473 0x00000000, 0x0000003f },
12474 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12475 0x00000000, 0x000001ff },
12476 { BUFMGR_MB_HIGH_WATER, 0x0000,
12477 0x00000000, 0x000001ff },
12478 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12479 0xffffffff, 0x00000000 },
12480 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12481 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012482
Michael Chana71116d2005-05-29 14:58:11 -070012483 /* Mailbox Registers */
12484 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12485 0x00000000, 0x000001ff },
12486 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12487 0x00000000, 0x000001ff },
12488 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12489 0x00000000, 0x000007ff },
12490 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12491 0x00000000, 0x000001ff },
12492
12493 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12494 };
12495
Michael Chanb16250e2006-09-27 16:10:14 -070012496 is_5705 = is_5750 = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000012497 if (tg3_flag(tp, 5705_PLUS)) {
Michael Chana71116d2005-05-29 14:58:11 -070012498 is_5705 = 1;
Joe Perches63c3a662011-04-26 08:12:10 +000012499 if (tg3_flag(tp, 5750_PLUS))
Michael Chanb16250e2006-09-27 16:10:14 -070012500 is_5750 = 1;
12501 }
Michael Chana71116d2005-05-29 14:58:11 -070012502
12503 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12504 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12505 continue;
12506
12507 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12508 continue;
12509
Joe Perches63c3a662011-04-26 08:12:10 +000012510 if (tg3_flag(tp, IS_5788) &&
Michael Chana71116d2005-05-29 14:58:11 -070012511 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12512 continue;
12513
Michael Chanb16250e2006-09-27 16:10:14 -070012514 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12515 continue;
12516
Michael Chana71116d2005-05-29 14:58:11 -070012517 offset = (u32) reg_tbl[i].offset;
12518 read_mask = reg_tbl[i].read_mask;
12519 write_mask = reg_tbl[i].write_mask;
12520
12521 /* Save the original register content */
12522 save_val = tr32(offset);
12523
12524 /* Determine the read-only value. */
12525 read_val = save_val & read_mask;
12526
12527 /* Write zero to the register, then make sure the read-only bits
12528 * are not changed and the read/write bits are all zeros.
12529 */
12530 tw32(offset, 0);
12531
12532 val = tr32(offset);
12533
12534 /* Test the read-only and read/write bits. */
12535 if (((val & read_mask) != read_val) || (val & write_mask))
12536 goto out;
12537
12538 /* Write ones to all the bits defined by RdMask and WrMask, then
12539 * make sure the read-only bits are not changed and the
12540 * read/write bits are all ones.
12541 */
12542 tw32(offset, read_mask | write_mask);
12543
12544 val = tr32(offset);
12545
12546 /* Test the read-only bits. */
12547 if ((val & read_mask) != read_val)
12548 goto out;
12549
12550 /* Test the read/write bits. */
12551 if ((val & write_mask) != write_mask)
12552 goto out;
12553
12554 tw32(offset, save_val);
12555 }
12556
12557 return 0;
12558
12559out:
Michael Chan9f88f292006-12-07 00:22:54 -080012560 if (netif_msg_hw(tp))
Matt Carlson2445e462010-04-05 10:19:21 +000012561 netdev_err(tp->dev,
12562 "Register test failed at offset %x\n", offset);
Michael Chana71116d2005-05-29 14:58:11 -070012563 tw32(offset, save_val);
12564 return -EIO;
12565}
12566
Michael Chan7942e1d2005-05-29 14:58:36 -070012567static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12568{
Arjan van de Venf71e1302006-03-03 21:33:57 -050012569 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -070012570 int i;
12571 u32 j;
12572
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +020012573 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -070012574 for (j = 0; j < len; j += 4) {
12575 u32 val;
12576
12577 tg3_write_mem(tp, offset + j, test_pattern[i]);
12578 tg3_read_mem(tp, offset + j, &val);
12579 if (val != test_pattern[i])
12580 return -EIO;
12581 }
12582 }
12583 return 0;
12584}
12585
12586static int tg3_test_memory(struct tg3 *tp)
12587{
12588 static struct mem_entry {
12589 u32 offset;
12590 u32 len;
12591 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -080012592 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -070012593 { 0x00002000, 0x1c000},
12594 { 0xffffffff, 0x00000}
12595 }, mem_tbl_5705[] = {
12596 { 0x00000100, 0x0000c},
12597 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -070012598 { 0x00004000, 0x00800},
12599 { 0x00006000, 0x01000},
12600 { 0x00008000, 0x02000},
12601 { 0x00010000, 0x0e000},
12602 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -080012603 }, mem_tbl_5755[] = {
12604 { 0x00000200, 0x00008},
12605 { 0x00004000, 0x00800},
12606 { 0x00006000, 0x00800},
12607 { 0x00008000, 0x02000},
12608 { 0x00010000, 0x0c000},
12609 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -070012610 }, mem_tbl_5906[] = {
12611 { 0x00000200, 0x00008},
12612 { 0x00004000, 0x00400},
12613 { 0x00006000, 0x00400},
12614 { 0x00008000, 0x01000},
12615 { 0x00010000, 0x01000},
12616 { 0xffffffff, 0x00000}
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012617 }, mem_tbl_5717[] = {
12618 { 0x00000200, 0x00008},
12619 { 0x00010000, 0x0a000},
12620 { 0x00020000, 0x13c00},
12621 { 0xffffffff, 0x00000}
12622 }, mem_tbl_57765[] = {
12623 { 0x00000200, 0x00008},
12624 { 0x00004000, 0x00800},
12625 { 0x00006000, 0x09800},
12626 { 0x00010000, 0x0a000},
12627 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -070012628 };
12629 struct mem_entry *mem_tbl;
12630 int err = 0;
12631 int i;
12632
Joe Perches63c3a662011-04-26 08:12:10 +000012633 if (tg3_flag(tp, 5717_PLUS))
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012634 mem_tbl = mem_tbl_5717;
Michael Chanc65a17f2013-01-06 12:51:07 +000012635 else if (tg3_flag(tp, 57765_CLASS) ||
Joe Perches41535772013-02-16 11:20:04 +000012636 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlson8b5a6c42010-01-20 16:58:06 +000012637 mem_tbl = mem_tbl_57765;
Joe Perches63c3a662011-04-26 08:12:10 +000012638 else if (tg3_flag(tp, 5755_PLUS))
Matt Carlson321d32a2008-11-21 17:22:19 -080012639 mem_tbl = mem_tbl_5755;
Joe Perches41535772013-02-16 11:20:04 +000012640 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
Matt Carlson321d32a2008-11-21 17:22:19 -080012641 mem_tbl = mem_tbl_5906;
Joe Perches63c3a662011-04-26 08:12:10 +000012642 else if (tg3_flag(tp, 5705_PLUS))
Matt Carlson321d32a2008-11-21 17:22:19 -080012643 mem_tbl = mem_tbl_5705;
12644 else
Michael Chan7942e1d2005-05-29 14:58:36 -070012645 mem_tbl = mem_tbl_570x;
12646
12647 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
Matt Carlsonbe98da62010-07-11 09:31:46 +000012648 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12649 if (err)
Michael Chan7942e1d2005-05-29 14:58:36 -070012650 break;
12651 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012652
Michael Chan7942e1d2005-05-29 14:58:36 -070012653 return err;
12654}
12655
Matt Carlsonbb158d62011-04-25 12:42:47 +000012656#define TG3_TSO_MSS 500
12657
12658#define TG3_TSO_IP_HDR_LEN 20
12659#define TG3_TSO_TCP_HDR_LEN 20
12660#define TG3_TSO_TCP_OPT_LEN 12
12661
12662static const u8 tg3_tso_header[] = {
126630x08, 0x00,
126640x45, 0x00, 0x00, 0x00,
126650x00, 0x00, 0x40, 0x00,
126660x40, 0x06, 0x00, 0x00,
126670x0a, 0x00, 0x00, 0x01,
126680x0a, 0x00, 0x00, 0x02,
126690x0d, 0x00, 0xe0, 0x00,
126700x00, 0x00, 0x01, 0x00,
126710x00, 0x00, 0x02, 0x00,
126720x80, 0x10, 0x10, 0x00,
126730x14, 0x09, 0x00, 0x00,
126740x01, 0x01, 0x08, 0x0a,
126750x11, 0x11, 0x11, 0x11,
126760x11, 0x11, 0x11, 0x11,
12677};
Michael Chan9f40dea2005-09-05 17:53:06 -070012678
Matt Carlson28a45952011-08-19 13:58:22 +000012679static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
Michael Chanc76949a2005-05-29 14:58:59 -070012680{
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012681 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012682 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
Matt Carlson84b67b22011-07-27 14:20:52 +000012683 u32 budget;
Eric Dumazet9205fd92011-11-18 06:47:01 +000012684 struct sk_buff *skb;
12685 u8 *tx_data, *rx_data;
Michael Chanc76949a2005-05-29 14:58:59 -070012686 dma_addr_t map;
12687 int num_pkts, tx_len, rx_len, i, err;
12688 struct tg3_rx_buffer_desc *desc;
Matt Carlson898a56f2009-08-28 14:02:40 +000012689 struct tg3_napi *tnapi, *rnapi;
Matt Carlson8fea32b2010-09-15 08:59:58 +000012690 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
Michael Chanc76949a2005-05-29 14:58:59 -070012691
Matt Carlsonc8873402010-02-12 14:47:11 +000012692 tnapi = &tp->napi[0];
12693 rnapi = &tp->napi[0];
Matt Carlson0c1d0e22009-09-01 13:16:33 +000012694 if (tp->irq_cnt > 1) {
Joe Perches63c3a662011-04-26 08:12:10 +000012695 if (tg3_flag(tp, ENABLE_RSS))
Matt Carlson1da85aa2010-09-30 10:34:34 +000012696 rnapi = &tp->napi[1];
Joe Perches63c3a662011-04-26 08:12:10 +000012697 if (tg3_flag(tp, ENABLE_TSS))
Matt Carlsonc8873402010-02-12 14:47:11 +000012698 tnapi = &tp->napi[1];
Matt Carlson0c1d0e22009-09-01 13:16:33 +000012699 }
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012700 coal_now = tnapi->coal_now | rnapi->coal_now;
Matt Carlson898a56f2009-08-28 14:02:40 +000012701
Michael Chanc76949a2005-05-29 14:58:59 -070012702 err = -EIO;
12703
Matt Carlson4852a862011-04-13 11:05:07 +000012704 tx_len = pktsz;
David S. Millera20e9c62006-07-31 22:38:16 -070012705 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070012706 if (!skb)
12707 return -ENOMEM;
12708
Michael Chanc76949a2005-05-29 14:58:59 -070012709 tx_data = skb_put(skb, tx_len);
12710 memcpy(tx_data, tp->dev->dev_addr, 6);
12711 memset(tx_data + 6, 0x0, 8);
12712
Matt Carlson4852a862011-04-13 11:05:07 +000012713 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
Michael Chanc76949a2005-05-29 14:58:59 -070012714
Matt Carlson28a45952011-08-19 13:58:22 +000012715 if (tso_loopback) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012716 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12717
12718 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12719 TG3_TSO_TCP_OPT_LEN;
12720
12721 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12722 sizeof(tg3_tso_header));
12723 mss = TG3_TSO_MSS;
12724
12725 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12726 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12727
12728 /* Set the total length field in the IP header */
12729 iph->tot_len = htons((u16)(mss + hdr_len));
12730
12731 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12732 TXD_FLAG_CPU_POST_DMA);
12733
Joe Perches63c3a662011-04-26 08:12:10 +000012734 if (tg3_flag(tp, HW_TSO_1) ||
12735 tg3_flag(tp, HW_TSO_2) ||
12736 tg3_flag(tp, HW_TSO_3)) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012737 struct tcphdr *th;
12738 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12739 th = (struct tcphdr *)&tx_data[val];
12740 th->check = 0;
12741 } else
12742 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12743
Joe Perches63c3a662011-04-26 08:12:10 +000012744 if (tg3_flag(tp, HW_TSO_3)) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012745 mss |= (hdr_len & 0xc) << 12;
12746 if (hdr_len & 0x10)
12747 base_flags |= 0x00000010;
12748 base_flags |= (hdr_len & 0x3e0) << 5;
Joe Perches63c3a662011-04-26 08:12:10 +000012749 } else if (tg3_flag(tp, HW_TSO_2))
Matt Carlsonbb158d62011-04-25 12:42:47 +000012750 mss |= hdr_len << 9;
Joe Perches63c3a662011-04-26 08:12:10 +000012751 else if (tg3_flag(tp, HW_TSO_1) ||
Joe Perches41535772013-02-16 11:20:04 +000012752 tg3_asic_rev(tp) == ASIC_REV_5705) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012753 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12754 } else {
12755 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12756 }
12757
12758 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12759 } else {
12760 num_pkts = 1;
12761 data_off = ETH_HLEN;
Michael Chanc441b452012-03-04 14:48:13 +000012762
12763 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12764 tx_len > VLAN_ETH_FRAME_LEN)
12765 base_flags |= TXD_FLAG_JMB_PKT;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012766 }
12767
12768 for (i = data_off; i < tx_len; i++)
Michael Chanc76949a2005-05-29 14:58:59 -070012769 tx_data[i] = (u8) (i & 0xff);
12770
Alexander Duyckf4188d82009-12-02 16:48:38 +000012771 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12772 if (pci_dma_mapping_error(tp->pdev, map)) {
Matt Carlsona21771d2009-11-02 14:25:31 +000012773 dev_kfree_skb(skb);
12774 return -EIO;
12775 }
Michael Chanc76949a2005-05-29 14:58:59 -070012776
Matt Carlson0d681b22011-07-27 14:20:49 +000012777 val = tnapi->tx_prod;
12778 tnapi->tx_buffers[val].skb = skb;
12779 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12780
Michael Chanc76949a2005-05-29 14:58:59 -070012781 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012782 rnapi->coal_now);
Michael Chanc76949a2005-05-29 14:58:59 -070012783
12784 udelay(10);
12785
Matt Carlson898a56f2009-08-28 14:02:40 +000012786 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
Michael Chanc76949a2005-05-29 14:58:59 -070012787
Matt Carlson84b67b22011-07-27 14:20:52 +000012788 budget = tg3_tx_avail(tnapi);
12789 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
Matt Carlsond1a3b732011-07-27 14:20:51 +000012790 base_flags | TXD_FLAG_END, mss, 0)) {
12791 tnapi->tx_buffers[val].skb = NULL;
12792 dev_kfree_skb(skb);
12793 return -EIO;
12794 }
Michael Chanc76949a2005-05-29 14:58:59 -070012795
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012796 tnapi->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070012797
Michael Chan6541b802012-03-04 14:48:14 +000012798 /* Sync BD data before updating mailbox */
12799 wmb();
12800
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012801 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12802 tr32_mailbox(tnapi->prodmbox);
Michael Chanc76949a2005-05-29 14:58:59 -070012803
12804 udelay(10);
12805
Matt Carlson303fc922009-11-02 14:27:34 +000012806 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12807 for (i = 0; i < 35; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070012808 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
Matt Carlsonfd2ce372009-09-01 12:51:13 +000012809 coal_now);
Michael Chanc76949a2005-05-29 14:58:59 -070012810
12811 udelay(10);
12812
Matt Carlson898a56f2009-08-28 14:02:40 +000012813 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12814 rx_idx = rnapi->hw_status->idx[0].rx_producer;
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012815 if ((tx_idx == tnapi->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070012816 (rx_idx == (rx_start_idx + num_pkts)))
12817 break;
12818 }
12819
Matt Carlsonba1142e2011-11-04 09:15:00 +000012820 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
Michael Chanc76949a2005-05-29 14:58:59 -070012821 dev_kfree_skb(skb);
12822
Matt Carlsonf3f3f272009-08-28 14:03:21 +000012823 if (tx_idx != tnapi->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070012824 goto out;
12825
12826 if (rx_idx != rx_start_idx + num_pkts)
12827 goto out;
12828
Matt Carlsonbb158d62011-04-25 12:42:47 +000012829 val = data_off;
12830 while (rx_idx != rx_start_idx) {
12831 desc = &rnapi->rx_rcb[rx_start_idx++];
12832 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12833 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
Michael Chanc76949a2005-05-29 14:58:59 -070012834
Matt Carlsonbb158d62011-04-25 12:42:47 +000012835 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12836 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
Matt Carlson4852a862011-04-13 11:05:07 +000012837 goto out;
Michael Chanc76949a2005-05-29 14:58:59 -070012838
Matt Carlsonbb158d62011-04-25 12:42:47 +000012839 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12840 - ETH_FCS_LEN;
12841
Matt Carlson28a45952011-08-19 13:58:22 +000012842 if (!tso_loopback) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012843 if (rx_len != tx_len)
12844 goto out;
12845
12846 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12847 if (opaque_key != RXD_OPAQUE_RING_STD)
12848 goto out;
12849 } else {
12850 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12851 goto out;
12852 }
12853 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12854 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
Matt Carlson54e0a672011-05-19 12:12:50 +000012855 >> RXD_TCPCSUM_SHIFT != 0xffff) {
Matt Carlsonbb158d62011-04-25 12:42:47 +000012856 goto out;
12857 }
12858
12859 if (opaque_key == RXD_OPAQUE_RING_STD) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012860 rx_data = tpr->rx_std_buffers[desc_idx].data;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012861 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12862 mapping);
12863 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012864 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
Matt Carlsonbb158d62011-04-25 12:42:47 +000012865 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12866 mapping);
12867 } else
Matt Carlson4852a862011-04-13 11:05:07 +000012868 goto out;
12869
Matt Carlsonbb158d62011-04-25 12:42:47 +000012870 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12871 PCI_DMA_FROMDEVICE);
12872
Eric Dumazet9205fd92011-11-18 06:47:01 +000012873 rx_data += TG3_RX_OFFSET(tp);
Matt Carlsonbb158d62011-04-25 12:42:47 +000012874 for (i = data_off; i < rx_len; i++, val++) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000012875 if (*(rx_data + i) != (u8) (val & 0xff))
Matt Carlsonbb158d62011-04-25 12:42:47 +000012876 goto out;
12877 }
Matt Carlson4852a862011-04-13 11:05:07 +000012878 }
12879
Michael Chanc76949a2005-05-29 14:58:59 -070012880 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012881
Eric Dumazet9205fd92011-11-18 06:47:01 +000012882 /* tg3_free_rings will unmap and free the rx_data */
Michael Chanc76949a2005-05-29 14:58:59 -070012883out:
12884 return err;
12885}
12886
Matt Carlson00c266b2011-04-25 12:42:46 +000012887#define TG3_STD_LOOPBACK_FAILED 1
12888#define TG3_JMB_LOOPBACK_FAILED 2
Matt Carlsonbb158d62011-04-25 12:42:47 +000012889#define TG3_TSO_LOOPBACK_FAILED 4
Matt Carlson28a45952011-08-19 13:58:22 +000012890#define TG3_LOOPBACK_FAILED \
12891 (TG3_STD_LOOPBACK_FAILED | \
12892 TG3_JMB_LOOPBACK_FAILED | \
12893 TG3_TSO_LOOPBACK_FAILED)
Matt Carlson00c266b2011-04-25 12:42:46 +000012894
Matt Carlson941ec902011-08-19 13:58:23 +000012895static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
Michael Chan9f40dea2005-09-05 17:53:06 -070012896{
Matt Carlson28a45952011-08-19 13:58:22 +000012897 int err = -EIO;
Matt Carlson2215e242011-08-19 13:58:19 +000012898 u32 eee_cap;
Michael Chanc441b452012-03-04 14:48:13 +000012899 u32 jmb_pkt_sz = 9000;
12900
12901 if (tp->dma_limit)
12902 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
Michael Chan9f40dea2005-09-05 17:53:06 -070012903
Matt Carlsonab789042011-01-25 15:58:54 +000012904 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12905 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12906
Matt Carlson28a45952011-08-19 13:58:22 +000012907 if (!netif_running(tp->dev)) {
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012908 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12909 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012910 if (do_extlpbk)
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012911 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlson28a45952011-08-19 13:58:22 +000012912 goto done;
12913 }
12914
Michael Chanb9ec6c12006-07-25 16:37:27 -070012915 err = tg3_reset_hw(tp, 1);
Matt Carlsonab789042011-01-25 15:58:54 +000012916 if (err) {
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012917 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12918 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012919 if (do_extlpbk)
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012920 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
Matt Carlsonab789042011-01-25 15:58:54 +000012921 goto done;
12922 }
Michael Chan9f40dea2005-09-05 17:53:06 -070012923
Joe Perches63c3a662011-04-26 08:12:10 +000012924 if (tg3_flag(tp, ENABLE_RSS)) {
Matt Carlson4a85f092011-04-20 07:57:37 +000012925 int i;
12926
12927 /* Reroute all rx packets to the 1st queue */
12928 for (i = MAC_RSS_INDIR_TBL_0;
12929 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12930 tw32(i, 0x0);
12931 }
12932
Matt Carlson6e01b202011-08-19 13:58:20 +000012933 /* HW errata - mac loopback fails in some cases on 5780.
12934 * Normal traffic and PHY loopback are not affected by
12935 * errata. Also, the MAC loopback test is deprecated for
12936 * all newer ASIC revisions.
12937 */
Joe Perches41535772013-02-16 11:20:04 +000012938 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
Matt Carlson6e01b202011-08-19 13:58:20 +000012939 !tg3_flag(tp, CPMU_PRESENT)) {
12940 tg3_mac_loopback(tp, true);
Matt Carlson9936bcf2007-10-10 18:03:07 -070012941
Matt Carlson28a45952011-08-19 13:58:22 +000012942 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012943 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
Matt Carlson6e01b202011-08-19 13:58:20 +000012944
12945 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012946 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012947 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
Matt Carlson6e01b202011-08-19 13:58:20 +000012948
12949 tg3_mac_loopback(tp, false);
12950 }
Matt Carlson4852a862011-04-13 11:05:07 +000012951
Matt Carlsonf07e9af2010-08-02 11:26:07 +000012952 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +000012953 !tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012954 int i;
12955
Matt Carlson941ec902011-08-19 13:58:23 +000012956 tg3_phy_lpbk_set(tp, 0, false);
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012957
12958 /* Wait for link */
12959 for (i = 0; i < 100; i++) {
12960 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12961 break;
12962 mdelay(1);
12963 }
12964
Matt Carlson28a45952011-08-19 13:58:22 +000012965 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012966 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
Joe Perches63c3a662011-04-26 08:12:10 +000012967 if (tg3_flag(tp, TSO_CAPABLE) &&
Matt Carlson28a45952011-08-19 13:58:22 +000012968 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012969 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
Joe Perches63c3a662011-04-26 08:12:10 +000012970 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012971 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012972 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070012973
Matt Carlson941ec902011-08-19 13:58:23 +000012974 if (do_extlpbk) {
12975 tg3_phy_lpbk_set(tp, 0, true);
12976
12977 /* All link indications report up, but the hardware
12978 * isn't really ready for about 20 msec. Double it
12979 * to be sure.
12980 */
12981 mdelay(40);
12982
12983 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012984 data[TG3_EXT_LOOPB_TEST] |=
12985 TG3_STD_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012986 if (tg3_flag(tp, TSO_CAPABLE) &&
12987 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012988 data[TG3_EXT_LOOPB_TEST] |=
12989 TG3_TSO_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012990 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
Michael Chanc441b452012-03-04 14:48:13 +000012991 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000012992 data[TG3_EXT_LOOPB_TEST] |=
12993 TG3_JMB_LOOPBACK_FAILED;
Matt Carlson941ec902011-08-19 13:58:23 +000012994 }
12995
Matt Carlson5e5a7f32011-08-19 13:58:21 +000012996 /* Re-enable gphy autopowerdown. */
12997 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12998 tg3_phy_toggle_apd(tp, true);
12999 }
Matt Carlson6833c042008-11-21 17:18:59 -080013000
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000013001 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13002 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
Matt Carlson28a45952011-08-19 13:58:22 +000013003
Matt Carlsonab789042011-01-25 15:58:54 +000013004done:
13005 tp->phy_flags |= eee_cap;
13006
Michael Chan9f40dea2005-09-05 17:53:06 -070013007 return err;
13008}
13009
Michael Chan4cafd3f2005-05-29 14:56:34 -070013010static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13011 u64 *data)
13012{
Michael Chan566f86a2005-05-29 14:56:58 -070013013 struct tg3 *tp = netdev_priv(dev);
Matt Carlson941ec902011-08-19 13:58:23 +000013014 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
Michael Chan566f86a2005-05-29 14:56:58 -070013015
Matt Carlsonbed98292011-07-13 09:27:29 +000013016 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13017 tg3_power_up(tp)) {
13018 etest->flags |= ETH_TEST_FL_FAILED;
13019 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13020 return;
13021 }
Michael Chanbc1c7562006-03-20 17:48:03 -080013022
Michael Chan566f86a2005-05-29 14:56:58 -070013023 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13024
13025 if (tg3_test_nvram(tp) != 0) {
13026 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000013027 data[TG3_NVRAM_TEST] = 1;
Michael Chan566f86a2005-05-29 14:56:58 -070013028 }
Matt Carlson941ec902011-08-19 13:58:23 +000013029 if (!doextlpbk && tg3_test_link(tp)) {
Michael Chanca430072005-05-29 14:57:23 -070013030 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000013031 data[TG3_LINK_TEST] = 1;
Michael Chanca430072005-05-29 14:57:23 -070013032 }
Michael Chana71116d2005-05-29 14:58:11 -070013033 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013034 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070013035
Michael Chanbbe832c2005-06-24 20:20:04 -070013036 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013037 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070013038 tg3_netif_stop(tp);
13039 irq_sync = 1;
13040 }
13041
13042 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070013043 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080013044 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070013045 tg3_halt_cpu(tp, RX_CPU_BASE);
Joe Perches63c3a662011-04-26 08:12:10 +000013046 if (!tg3_flag(tp, 5705_PLUS))
Michael Chana71116d2005-05-29 14:58:11 -070013047 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080013048 if (!err)
13049 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070013050
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013051 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
Michael Chand9ab5ad12006-03-20 22:27:35 -080013052 tg3_phy_reset(tp);
13053
Michael Chana71116d2005-05-29 14:58:11 -070013054 if (tg3_test_registers(tp) != 0) {
13055 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000013056 data[TG3_REGISTER_TEST] = 1;
Michael Chana71116d2005-05-29 14:58:11 -070013057 }
Matt Carlson28a45952011-08-19 13:58:22 +000013058
Michael Chan7942e1d2005-05-29 14:58:36 -070013059 if (tg3_test_memory(tp) != 0) {
13060 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000013061 data[TG3_MEMORY_TEST] = 1;
Michael Chan7942e1d2005-05-29 14:58:36 -070013062 }
Matt Carlson28a45952011-08-19 13:58:22 +000013063
Matt Carlson941ec902011-08-19 13:58:23 +000013064 if (doextlpbk)
13065 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13066
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000013067 if (tg3_test_loopback(tp, data, doextlpbk))
Michael Chanc76949a2005-05-29 14:58:59 -070013068 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070013069
David S. Millerf47c11e2005-06-24 20:18:35 -070013070 tg3_full_unlock(tp);
13071
Michael Chand4bc3922005-05-29 14:59:20 -070013072 if (tg3_test_interrupt(tp) != 0) {
13073 etest->flags |= ETH_TEST_FL_FAILED;
Nithin Nayak Sujir93df8b82012-11-14 14:44:28 +000013074 data[TG3_INTERRUPT_TEST] = 1;
Michael Chand4bc3922005-05-29 14:59:20 -070013075 }
David S. Millerf47c11e2005-06-24 20:18:35 -070013076
13077 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070013078
Michael Chana71116d2005-05-29 14:58:11 -070013079 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13080 if (netif_running(dev)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013081 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013082 err2 = tg3_restart_hw(tp, 1);
13083 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013084 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070013085 }
David S. Millerf47c11e2005-06-24 20:18:35 -070013086
13087 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013088
13089 if (irq_sync && !err2)
13090 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070013091 }
Matt Carlson80096062010-08-02 11:26:06 +000013092 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000013093 tg3_power_down(tp);
Michael Chanbc1c7562006-03-20 17:48:03 -080013094
Michael Chan4cafd3f2005-05-29 14:56:34 -070013095}
13096
Matt Carlson0a633ac2012-12-03 19:36:59 +000013097static int tg3_hwtstamp_ioctl(struct net_device *dev,
13098 struct ifreq *ifr, int cmd)
13099{
13100 struct tg3 *tp = netdev_priv(dev);
13101 struct hwtstamp_config stmpconf;
13102
13103 if (!tg3_flag(tp, PTP_CAPABLE))
13104 return -EINVAL;
13105
13106 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13107 return -EFAULT;
13108
13109 if (stmpconf.flags)
13110 return -EINVAL;
13111
13112 switch (stmpconf.tx_type) {
13113 case HWTSTAMP_TX_ON:
13114 tg3_flag_set(tp, TX_TSTAMP_EN);
13115 break;
13116 case HWTSTAMP_TX_OFF:
13117 tg3_flag_clear(tp, TX_TSTAMP_EN);
13118 break;
13119 default:
13120 return -ERANGE;
13121 }
13122
13123 switch (stmpconf.rx_filter) {
13124 case HWTSTAMP_FILTER_NONE:
13125 tp->rxptpctl = 0;
13126 break;
13127 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13128 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13129 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13130 break;
13131 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13132 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13133 TG3_RX_PTP_CTL_SYNC_EVNT;
13134 break;
13135 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13136 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13137 TG3_RX_PTP_CTL_DELAY_REQ;
13138 break;
13139 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13140 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13141 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13142 break;
13143 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13144 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13145 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13146 break;
13147 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13148 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13149 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13150 break;
13151 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13152 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13153 TG3_RX_PTP_CTL_SYNC_EVNT;
13154 break;
13155 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13156 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13157 TG3_RX_PTP_CTL_SYNC_EVNT;
13158 break;
13159 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13160 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13161 TG3_RX_PTP_CTL_SYNC_EVNT;
13162 break;
13163 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13164 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13165 TG3_RX_PTP_CTL_DELAY_REQ;
13166 break;
13167 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13168 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13169 TG3_RX_PTP_CTL_DELAY_REQ;
13170 break;
13171 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13172 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13173 TG3_RX_PTP_CTL_DELAY_REQ;
13174 break;
13175 default:
13176 return -ERANGE;
13177 }
13178
13179 if (netif_running(dev) && tp->rxptpctl)
13180 tw32(TG3_RX_PTP_CTL,
13181 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13182
13183 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13184 -EFAULT : 0;
13185}
13186
Linus Torvalds1da177e2005-04-16 15:20:36 -070013187static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13188{
13189 struct mii_ioctl_data *data = if_mii(ifr);
13190 struct tg3 *tp = netdev_priv(dev);
13191 int err;
13192
Joe Perches63c3a662011-04-26 08:12:10 +000013193 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000013194 struct phy_device *phydev;
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013195 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013196 return -EAGAIN;
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000013197 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Richard Cochran28b04112010-07-17 08:48:55 +000013198 return phy_mii_ioctl(phydev, ifr, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013199 }
13200
Matt Carlson33f401a2010-04-05 10:19:27 +000013201 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013202 case SIOCGMIIPHY:
Matt Carlson882e9792009-09-01 13:21:36 +000013203 data->phy_id = tp->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013204
13205 /* fallthru */
13206 case SIOCGMIIREG: {
13207 u32 mii_regval;
13208
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013209 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013210 break; /* We have no PHY */
13211
Matt Carlson34eea5a2011-04-20 07:57:38 +000013212 if (!netif_running(dev))
Michael Chanbc1c7562006-03-20 17:48:03 -080013213 return -EAGAIN;
13214
David S. Millerf47c11e2005-06-24 20:18:35 -070013215 spin_lock_bh(&tp->lock);
Hauke Mehrtens5c358042013-02-07 05:37:38 +000013216 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13217 data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070013218 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013219
13220 data->val_out = mii_regval;
13221
13222 return err;
13223 }
13224
13225 case SIOCSMIIREG:
Matt Carlsonf07e9af2010-08-02 11:26:07 +000013226 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013227 break; /* We have no PHY */
13228
Matt Carlson34eea5a2011-04-20 07:57:38 +000013229 if (!netif_running(dev))
Michael Chanbc1c7562006-03-20 17:48:03 -080013230 return -EAGAIN;
13231
David S. Millerf47c11e2005-06-24 20:18:35 -070013232 spin_lock_bh(&tp->lock);
Hauke Mehrtens5c358042013-02-07 05:37:38 +000013233 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13234 data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070013235 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013236
13237 return err;
13238
Matt Carlson0a633ac2012-12-03 19:36:59 +000013239 case SIOCSHWTSTAMP:
13240 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13241
Linus Torvalds1da177e2005-04-16 15:20:36 -070013242 default:
13243 /* do nothing */
13244 break;
13245 }
13246 return -EOPNOTSUPP;
13247}
13248
David S. Miller15f98502005-05-18 22:49:26 -070013249static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13250{
13251 struct tg3 *tp = netdev_priv(dev);
13252
13253 memcpy(ec, &tp->coal, sizeof(*ec));
13254 return 0;
13255}
13256
Michael Chand244c892005-07-05 14:42:33 -070013257static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13258{
13259 struct tg3 *tp = netdev_priv(dev);
13260 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13261 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13262
Joe Perches63c3a662011-04-26 08:12:10 +000013263 if (!tg3_flag(tp, 5705_PLUS)) {
Michael Chand244c892005-07-05 14:42:33 -070013264 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13265 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13266 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13267 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13268 }
13269
13270 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13271 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13272 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13273 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13274 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13275 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13276 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13277 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13278 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13279 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13280 return -EINVAL;
13281
13282 /* No rx interrupts will be generated if both are zero */
13283 if ((ec->rx_coalesce_usecs == 0) &&
13284 (ec->rx_max_coalesced_frames == 0))
13285 return -EINVAL;
13286
13287 /* No tx interrupts will be generated if both are zero */
13288 if ((ec->tx_coalesce_usecs == 0) &&
13289 (ec->tx_max_coalesced_frames == 0))
13290 return -EINVAL;
13291
13292 /* Only copy relevant parameters, ignore all others. */
13293 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13294 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13295 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13296 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13297 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13298 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13299 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13300 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13301 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13302
13303 if (netif_running(dev)) {
13304 tg3_full_lock(tp, 0);
13305 __tg3_set_coalesce(tp, &tp->coal);
13306 tg3_full_unlock(tp);
13307 }
13308 return 0;
13309}
13310
Jeff Garzik7282d492006-09-13 14:30:00 -040013311static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013312 .get_settings = tg3_get_settings,
13313 .set_settings = tg3_set_settings,
13314 .get_drvinfo = tg3_get_drvinfo,
13315 .get_regs_len = tg3_get_regs_len,
13316 .get_regs = tg3_get_regs,
13317 .get_wol = tg3_get_wol,
13318 .set_wol = tg3_set_wol,
13319 .get_msglevel = tg3_get_msglevel,
13320 .set_msglevel = tg3_set_msglevel,
13321 .nway_reset = tg3_nway_reset,
13322 .get_link = ethtool_op_get_link,
13323 .get_eeprom_len = tg3_get_eeprom_len,
13324 .get_eeprom = tg3_get_eeprom,
13325 .set_eeprom = tg3_set_eeprom,
13326 .get_ringparam = tg3_get_ringparam,
13327 .set_ringparam = tg3_set_ringparam,
13328 .get_pauseparam = tg3_get_pauseparam,
13329 .set_pauseparam = tg3_set_pauseparam,
Michael Chan4cafd3f2005-05-29 14:56:34 -070013330 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013331 .get_strings = tg3_get_strings,
stephen hemminger81b87092011-04-04 08:43:50 +000013332 .set_phys_id = tg3_set_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013333 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070013334 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070013335 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070013336 .get_sset_count = tg3_get_sset_count,
Matt Carlson90415472011-12-16 13:33:23 +000013337 .get_rxnfc = tg3_get_rxnfc,
13338 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13339 .get_rxfh_indir = tg3_get_rxfh_indir,
13340 .set_rxfh_indir = tg3_set_rxfh_indir,
Michael Chan09681692012-09-28 07:12:42 +000013341 .get_channels = tg3_get_channels,
13342 .set_channels = tg3_set_channels,
Matt Carlson7d41e492012-12-03 19:36:58 +000013343 .get_ts_info = tg3_get_ts_info,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013344};
13345
David S. Millerb4017c52012-03-01 17:57:40 -050013346static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13347 struct rtnl_link_stats64 *stats)
13348{
13349 struct tg3 *tp = netdev_priv(dev);
13350
David S. Millerb4017c52012-03-01 17:57:40 -050013351 spin_lock_bh(&tp->lock);
Michael Chan0f566b22012-07-29 19:15:44 +000013352 if (!tp->hw_stats) {
13353 spin_unlock_bh(&tp->lock);
13354 return &tp->net_stats_prev;
13355 }
13356
David S. Millerb4017c52012-03-01 17:57:40 -050013357 tg3_get_nstats(tp, stats);
13358 spin_unlock_bh(&tp->lock);
13359
13360 return stats;
13361}
13362
Matt Carlsonccd5ba92012-02-13 10:20:08 +000013363static void tg3_set_rx_mode(struct net_device *dev)
13364{
13365 struct tg3 *tp = netdev_priv(dev);
13366
13367 if (!netif_running(dev))
13368 return;
13369
13370 tg3_full_lock(tp, 0);
13371 __tg3_set_rx_mode(dev);
13372 tg3_full_unlock(tp);
13373}
13374
Matt Carlsonfaf16272012-02-13 10:20:07 +000013375static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13376 int new_mtu)
13377{
13378 dev->mtu = new_mtu;
13379
13380 if (new_mtu > ETH_DATA_LEN) {
13381 if (tg3_flag(tp, 5780_CLASS)) {
13382 netdev_update_features(dev);
13383 tg3_flag_clear(tp, TSO_CAPABLE);
13384 } else {
13385 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13386 }
13387 } else {
13388 if (tg3_flag(tp, 5780_CLASS)) {
13389 tg3_flag_set(tp, TSO_CAPABLE);
13390 netdev_update_features(dev);
13391 }
13392 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13393 }
13394}
13395
13396static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13397{
13398 struct tg3 *tp = netdev_priv(dev);
Michael Chan2fae5e32012-03-04 14:48:15 +000013399 int err, reset_phy = 0;
Matt Carlsonfaf16272012-02-13 10:20:07 +000013400
13401 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13402 return -EINVAL;
13403
13404 if (!netif_running(dev)) {
13405 /* We'll just catch it later when the
13406 * device is up'd.
13407 */
13408 tg3_set_mtu(dev, tp, new_mtu);
13409 return 0;
13410 }
13411
13412 tg3_phy_stop(tp);
13413
13414 tg3_netif_stop(tp);
13415
13416 tg3_full_lock(tp, 1);
13417
13418 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13419
13420 tg3_set_mtu(dev, tp, new_mtu);
13421
Michael Chan2fae5e32012-03-04 14:48:15 +000013422 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13423 * breaks all requests to 256 bytes.
13424 */
Joe Perches41535772013-02-16 11:20:04 +000013425 if (tg3_asic_rev(tp) == ASIC_REV_57766)
Michael Chan2fae5e32012-03-04 14:48:15 +000013426 reset_phy = 1;
13427
13428 err = tg3_restart_hw(tp, reset_phy);
Matt Carlsonfaf16272012-02-13 10:20:07 +000013429
13430 if (!err)
13431 tg3_netif_start(tp);
13432
13433 tg3_full_unlock(tp);
13434
13435 if (!err)
13436 tg3_phy_start(tp);
13437
13438 return err;
13439}
13440
13441static const struct net_device_ops tg3_netdev_ops = {
13442 .ndo_open = tg3_open,
13443 .ndo_stop = tg3_close,
13444 .ndo_start_xmit = tg3_start_xmit,
13445 .ndo_get_stats64 = tg3_get_stats64,
13446 .ndo_validate_addr = eth_validate_addr,
13447 .ndo_set_rx_mode = tg3_set_rx_mode,
13448 .ndo_set_mac_address = tg3_set_mac_addr,
13449 .ndo_do_ioctl = tg3_ioctl,
13450 .ndo_tx_timeout = tg3_tx_timeout,
13451 .ndo_change_mtu = tg3_change_mtu,
13452 .ndo_fix_features = tg3_fix_features,
13453 .ndo_set_features = tg3_set_features,
13454#ifdef CONFIG_NET_POLL_CONTROLLER
13455 .ndo_poll_controller = tg3_poll_controller,
13456#endif
13457};
13458
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013459static void tg3_get_eeprom_size(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013460{
Michael Chan1b277772006-03-20 22:27:48 -080013461 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013462
13463 tp->nvram_size = EEPROM_CHIP_SIZE;
13464
Matt Carlsone4f34112009-02-25 14:25:00 +000013465 if (tg3_nvram_read(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013466 return;
13467
Michael Chanb16250e2006-09-27 16:10:14 -070013468 if ((magic != TG3_EEPROM_MAGIC) &&
13469 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13470 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070013471 return;
13472
13473 /*
13474 * Size the chip by reading offsets at increasing powers of two.
13475 * When we encounter our validation signature, we know the addressing
13476 * has wrapped around, and thus have our chip size.
13477 */
Michael Chan1b277772006-03-20 22:27:48 -080013478 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013479
13480 while (cursize < tp->nvram_size) {
Matt Carlsone4f34112009-02-25 14:25:00 +000013481 if (tg3_nvram_read(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013482 return;
13483
Michael Chan18201802006-03-20 22:29:15 -080013484 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013485 break;
13486
13487 cursize <<= 1;
13488 }
13489
13490 tp->nvram_size = cursize;
13491}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040013492
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013493static void tg3_get_nvram_size(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013494{
13495 u32 val;
13496
Joe Perches63c3a662011-04-26 08:12:10 +000013497 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080013498 return;
13499
13500 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080013501 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080013502 tg3_get_eeprom_size(tp);
13503 return;
13504 }
13505
Matt Carlson6d348f22009-02-25 14:25:52 +000013506 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013507 if (val != 0) {
Matt Carlson6d348f22009-02-25 14:25:52 +000013508 /* This is confusing. We want to operate on the
13509 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13510 * call will read from NVRAM and byteswap the data
13511 * according to the byteswapping settings for all
13512 * other register accesses. This ensures the data we
13513 * want will always reside in the lower 16-bits.
13514 * However, the data in NVRAM is in LE format, which
13515 * means the data from the NVRAM read will always be
13516 * opposite the endianness of the CPU. The 16-bit
13517 * byteswap then brings the data to CPU endianness.
13518 */
13519 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013520 return;
13521 }
13522 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070013523 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013524}
13525
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013526static void tg3_get_nvram_info(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013527{
13528 u32 nvcfg1;
13529
13530 nvcfg1 = tr32(NVRAM_CFG1);
13531 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
Joe Perches63c3a662011-04-26 08:12:10 +000013532 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013533 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13535 tw32(NVRAM_CFG1, nvcfg1);
13536 }
13537
Joe Perches41535772013-02-16 11:20:04 +000013538 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
Joe Perches63c3a662011-04-26 08:12:10 +000013539 tg3_flag(tp, 5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013540 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000013541 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13542 tp->nvram_jedecnum = JEDEC_ATMEL;
13543 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000013544 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000013545 break;
13546 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13547 tp->nvram_jedecnum = JEDEC_ATMEL;
13548 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13549 break;
13550 case FLASH_VENDOR_ATMEL_EEPROM:
13551 tp->nvram_jedecnum = JEDEC_ATMEL;
13552 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000013553 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000013554 break;
13555 case FLASH_VENDOR_ST:
13556 tp->nvram_jedecnum = JEDEC_ST;
13557 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000013558 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000013559 break;
13560 case FLASH_VENDOR_SAIFUN:
13561 tp->nvram_jedecnum = JEDEC_SAIFUN;
13562 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13563 break;
13564 case FLASH_VENDOR_SST_SMALL:
13565 case FLASH_VENDOR_SST_LARGE:
13566 tp->nvram_jedecnum = JEDEC_SST;
13567 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13568 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013569 }
Matt Carlson8590a602009-08-28 12:29:16 +000013570 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013571 tp->nvram_jedecnum = JEDEC_ATMEL;
13572 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
Joe Perches63c3a662011-04-26 08:12:10 +000013573 tg3_flag_set(tp, NVRAM_BUFFERED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013574 }
13575}
13576
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013577static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
Matt Carlsona1b950d2009-09-01 13:20:17 +000013578{
13579 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13580 case FLASH_5752PAGE_SIZE_256:
13581 tp->nvram_pagesize = 256;
13582 break;
13583 case FLASH_5752PAGE_SIZE_512:
13584 tp->nvram_pagesize = 512;
13585 break;
13586 case FLASH_5752PAGE_SIZE_1K:
13587 tp->nvram_pagesize = 1024;
13588 break;
13589 case FLASH_5752PAGE_SIZE_2K:
13590 tp->nvram_pagesize = 2048;
13591 break;
13592 case FLASH_5752PAGE_SIZE_4K:
13593 tp->nvram_pagesize = 4096;
13594 break;
13595 case FLASH_5752PAGE_SIZE_264:
13596 tp->nvram_pagesize = 264;
13597 break;
13598 case FLASH_5752PAGE_SIZE_528:
13599 tp->nvram_pagesize = 528;
13600 break;
13601 }
13602}
13603
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013604static void tg3_get_5752_nvram_info(struct tg3 *tp)
Michael Chan361b4ac2005-04-21 17:11:21 -070013605{
13606 u32 nvcfg1;
13607
13608 nvcfg1 = tr32(NVRAM_CFG1);
13609
Michael Chane6af3012005-04-21 17:12:05 -070013610 /* NVRAM protection for TPM */
13611 if (nvcfg1 & (1 << 27))
Joe Perches63c3a662011-04-26 08:12:10 +000013612 tg3_flag_set(tp, PROTECTED_NVRAM);
Michael Chane6af3012005-04-21 17:12:05 -070013613
Michael Chan361b4ac2005-04-21 17:11:21 -070013614 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000013615 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13616 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13617 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013618 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000013619 break;
13620 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13621 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013622 tg3_flag_set(tp, NVRAM_BUFFERED);
13623 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013624 break;
13625 case FLASH_5752VENDOR_ST_M45PE10:
13626 case FLASH_5752VENDOR_ST_M45PE20:
13627 case FLASH_5752VENDOR_ST_M45PE40:
13628 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013629 tg3_flag_set(tp, NVRAM_BUFFERED);
13630 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013631 break;
Michael Chan361b4ac2005-04-21 17:11:21 -070013632 }
13633
Joe Perches63c3a662011-04-26 08:12:10 +000013634 if (tg3_flag(tp, FLASH)) {
Matt Carlsona1b950d2009-09-01 13:20:17 +000013635 tg3_nvram_get_pagesize(tp, nvcfg1);
Matt Carlson8590a602009-08-28 12:29:16 +000013636 } else {
Michael Chan361b4ac2005-04-21 17:11:21 -070013637 /* For eeprom, set pagesize to maximum eeprom size */
13638 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13639
13640 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13641 tw32(NVRAM_CFG1, nvcfg1);
13642 }
13643}
13644
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013645static void tg3_get_5755_nvram_info(struct tg3 *tp)
Michael Chand3c7b882006-03-23 01:28:25 -080013646{
Matt Carlson989a9d22007-05-05 11:51:05 -070013647 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080013648
13649 nvcfg1 = tr32(NVRAM_CFG1);
13650
13651 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070013652 if (nvcfg1 & (1 << 27)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013653 tg3_flag_set(tp, PROTECTED_NVRAM);
Matt Carlson989a9d22007-05-05 11:51:05 -070013654 protect = 1;
13655 }
Michael Chand3c7b882006-03-23 01:28:25 -080013656
Matt Carlson989a9d22007-05-05 11:51:05 -070013657 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13658 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013659 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13660 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13661 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13662 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13663 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013664 tg3_flag_set(tp, NVRAM_BUFFERED);
13665 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013666 tp->nvram_pagesize = 264;
13667 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13668 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13669 tp->nvram_size = (protect ? 0x3e200 :
13670 TG3_NVRAM_SIZE_512KB);
13671 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13672 tp->nvram_size = (protect ? 0x1f200 :
13673 TG3_NVRAM_SIZE_256KB);
13674 else
13675 tp->nvram_size = (protect ? 0x1f200 :
13676 TG3_NVRAM_SIZE_128KB);
13677 break;
13678 case FLASH_5752VENDOR_ST_M45PE10:
13679 case FLASH_5752VENDOR_ST_M45PE20:
13680 case FLASH_5752VENDOR_ST_M45PE40:
13681 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013682 tg3_flag_set(tp, NVRAM_BUFFERED);
13683 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013684 tp->nvram_pagesize = 256;
13685 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13686 tp->nvram_size = (protect ?
13687 TG3_NVRAM_SIZE_64KB :
13688 TG3_NVRAM_SIZE_128KB);
13689 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13690 tp->nvram_size = (protect ?
13691 TG3_NVRAM_SIZE_64KB :
13692 TG3_NVRAM_SIZE_256KB);
13693 else
13694 tp->nvram_size = (protect ?
13695 TG3_NVRAM_SIZE_128KB :
13696 TG3_NVRAM_SIZE_512KB);
13697 break;
Michael Chand3c7b882006-03-23 01:28:25 -080013698 }
13699}
13700
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013701static void tg3_get_5787_nvram_info(struct tg3 *tp)
Michael Chan1b277772006-03-20 22:27:48 -080013702{
13703 u32 nvcfg1;
13704
13705 nvcfg1 = tr32(NVRAM_CFG1);
13706
13707 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
Matt Carlson8590a602009-08-28 12:29:16 +000013708 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13709 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13710 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13711 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13712 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013713 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson8590a602009-08-28 12:29:16 +000013714 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
Michael Chan1b277772006-03-20 22:27:48 -080013715
Matt Carlson8590a602009-08-28 12:29:16 +000013716 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13717 tw32(NVRAM_CFG1, nvcfg1);
13718 break;
13719 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13720 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13721 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13722 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13723 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013724 tg3_flag_set(tp, NVRAM_BUFFERED);
13725 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013726 tp->nvram_pagesize = 264;
13727 break;
13728 case FLASH_5752VENDOR_ST_M45PE10:
13729 case FLASH_5752VENDOR_ST_M45PE20:
13730 case FLASH_5752VENDOR_ST_M45PE40:
13731 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013732 tg3_flag_set(tp, NVRAM_BUFFERED);
13733 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013734 tp->nvram_pagesize = 256;
13735 break;
Michael Chan1b277772006-03-20 22:27:48 -080013736 }
13737}
13738
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013739static void tg3_get_5761_nvram_info(struct tg3 *tp)
Matt Carlson6b91fa02007-10-10 18:01:09 -070013740{
13741 u32 nvcfg1, protect = 0;
13742
13743 nvcfg1 = tr32(NVRAM_CFG1);
13744
13745 /* NVRAM protection for TPM */
13746 if (nvcfg1 & (1 << 27)) {
Joe Perches63c3a662011-04-26 08:12:10 +000013747 tg3_flag_set(tp, PROTECTED_NVRAM);
Matt Carlson6b91fa02007-10-10 18:01:09 -070013748 protect = 1;
13749 }
13750
13751 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13752 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013753 case FLASH_5761VENDOR_ATMEL_ADB021D:
13754 case FLASH_5761VENDOR_ATMEL_ADB041D:
13755 case FLASH_5761VENDOR_ATMEL_ADB081D:
13756 case FLASH_5761VENDOR_ATMEL_ADB161D:
13757 case FLASH_5761VENDOR_ATMEL_MDB021D:
13758 case FLASH_5761VENDOR_ATMEL_MDB041D:
13759 case FLASH_5761VENDOR_ATMEL_MDB081D:
13760 case FLASH_5761VENDOR_ATMEL_MDB161D:
13761 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013762 tg3_flag_set(tp, NVRAM_BUFFERED);
13763 tg3_flag_set(tp, FLASH);
13764 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson8590a602009-08-28 12:29:16 +000013765 tp->nvram_pagesize = 256;
13766 break;
13767 case FLASH_5761VENDOR_ST_A_M45PE20:
13768 case FLASH_5761VENDOR_ST_A_M45PE40:
13769 case FLASH_5761VENDOR_ST_A_M45PE80:
13770 case FLASH_5761VENDOR_ST_A_M45PE16:
13771 case FLASH_5761VENDOR_ST_M_M45PE20:
13772 case FLASH_5761VENDOR_ST_M_M45PE40:
13773 case FLASH_5761VENDOR_ST_M_M45PE80:
13774 case FLASH_5761VENDOR_ST_M_M45PE16:
13775 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013776 tg3_flag_set(tp, NVRAM_BUFFERED);
13777 tg3_flag_set(tp, FLASH);
Matt Carlson8590a602009-08-28 12:29:16 +000013778 tp->nvram_pagesize = 256;
13779 break;
Matt Carlson6b91fa02007-10-10 18:01:09 -070013780 }
13781
13782 if (protect) {
13783 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13784 } else {
13785 switch (nvcfg1) {
Matt Carlson8590a602009-08-28 12:29:16 +000013786 case FLASH_5761VENDOR_ATMEL_ADB161D:
13787 case FLASH_5761VENDOR_ATMEL_MDB161D:
13788 case FLASH_5761VENDOR_ST_A_M45PE16:
13789 case FLASH_5761VENDOR_ST_M_M45PE16:
13790 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13791 break;
13792 case FLASH_5761VENDOR_ATMEL_ADB081D:
13793 case FLASH_5761VENDOR_ATMEL_MDB081D:
13794 case FLASH_5761VENDOR_ST_A_M45PE80:
13795 case FLASH_5761VENDOR_ST_M_M45PE80:
13796 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13797 break;
13798 case FLASH_5761VENDOR_ATMEL_ADB041D:
13799 case FLASH_5761VENDOR_ATMEL_MDB041D:
13800 case FLASH_5761VENDOR_ST_A_M45PE40:
13801 case FLASH_5761VENDOR_ST_M_M45PE40:
13802 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13803 break;
13804 case FLASH_5761VENDOR_ATMEL_ADB021D:
13805 case FLASH_5761VENDOR_ATMEL_MDB021D:
13806 case FLASH_5761VENDOR_ST_A_M45PE20:
13807 case FLASH_5761VENDOR_ST_M_M45PE20:
13808 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13809 break;
Matt Carlson6b91fa02007-10-10 18:01:09 -070013810 }
13811 }
13812}
13813
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013814static void tg3_get_5906_nvram_info(struct tg3 *tp)
Michael Chanb5d37722006-09-27 16:06:21 -070013815{
13816 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013817 tg3_flag_set(tp, NVRAM_BUFFERED);
Michael Chanb5d37722006-09-27 16:06:21 -070013818 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13819}
13820
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013821static void tg3_get_57780_nvram_info(struct tg3 *tp)
Matt Carlson321d32a2008-11-21 17:22:19 -080013822{
13823 u32 nvcfg1;
13824
13825 nvcfg1 = tr32(NVRAM_CFG1);
13826
13827 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13828 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13829 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13830 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013831 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson321d32a2008-11-21 17:22:19 -080013832 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13833
13834 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13835 tw32(NVRAM_CFG1, nvcfg1);
13836 return;
13837 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13838 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13839 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13840 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13841 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13842 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13843 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13844 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013845 tg3_flag_set(tp, NVRAM_BUFFERED);
13846 tg3_flag_set(tp, FLASH);
Matt Carlson321d32a2008-11-21 17:22:19 -080013847
13848 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13849 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13850 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13851 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13852 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13853 break;
13854 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13855 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13856 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13857 break;
13858 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13859 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13860 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13861 break;
13862 }
13863 break;
13864 case FLASH_5752VENDOR_ST_M45PE10:
13865 case FLASH_5752VENDOR_ST_M45PE20:
13866 case FLASH_5752VENDOR_ST_M45PE40:
13867 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013868 tg3_flag_set(tp, NVRAM_BUFFERED);
13869 tg3_flag_set(tp, FLASH);
Matt Carlson321d32a2008-11-21 17:22:19 -080013870
13871 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13872 case FLASH_5752VENDOR_ST_M45PE10:
13873 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13874 break;
13875 case FLASH_5752VENDOR_ST_M45PE20:
13876 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13877 break;
13878 case FLASH_5752VENDOR_ST_M45PE40:
13879 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13880 break;
13881 }
13882 break;
13883 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013884 tg3_flag_set(tp, NO_NVRAM);
Matt Carlson321d32a2008-11-21 17:22:19 -080013885 return;
13886 }
13887
Matt Carlsona1b950d2009-09-01 13:20:17 +000013888 tg3_nvram_get_pagesize(tp, nvcfg1);
13889 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013890 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013891}
13892
13893
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013894static void tg3_get_5717_nvram_info(struct tg3 *tp)
Matt Carlsona1b950d2009-09-01 13:20:17 +000013895{
13896 u32 nvcfg1;
13897
13898 nvcfg1 = tr32(NVRAM_CFG1);
13899
13900 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13901 case FLASH_5717VENDOR_ATMEL_EEPROM:
13902 case FLASH_5717VENDOR_MICRO_EEPROM:
13903 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013904 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013905 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13906
13907 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13908 tw32(NVRAM_CFG1, nvcfg1);
13909 return;
13910 case FLASH_5717VENDOR_ATMEL_MDB011D:
13911 case FLASH_5717VENDOR_ATMEL_ADB011B:
13912 case FLASH_5717VENDOR_ATMEL_ADB011D:
13913 case FLASH_5717VENDOR_ATMEL_MDB021D:
13914 case FLASH_5717VENDOR_ATMEL_ADB021B:
13915 case FLASH_5717VENDOR_ATMEL_ADB021D:
13916 case FLASH_5717VENDOR_ATMEL_45USPT:
13917 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013918 tg3_flag_set(tp, NVRAM_BUFFERED);
13919 tg3_flag_set(tp, FLASH);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013920
13921 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13922 case FLASH_5717VENDOR_ATMEL_MDB021D:
Matt Carlson66ee33b2011-04-05 14:22:51 +000013923 /* Detect size with tg3_nvram_get_size() */
13924 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013925 case FLASH_5717VENDOR_ATMEL_ADB021B:
13926 case FLASH_5717VENDOR_ATMEL_ADB021D:
13927 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13928 break;
13929 default:
13930 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13931 break;
13932 }
Matt Carlson321d32a2008-11-21 17:22:19 -080013933 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013934 case FLASH_5717VENDOR_ST_M_M25PE10:
13935 case FLASH_5717VENDOR_ST_A_M25PE10:
13936 case FLASH_5717VENDOR_ST_M_M45PE10:
13937 case FLASH_5717VENDOR_ST_A_M45PE10:
13938 case FLASH_5717VENDOR_ST_M_M25PE20:
13939 case FLASH_5717VENDOR_ST_A_M25PE20:
13940 case FLASH_5717VENDOR_ST_M_M45PE20:
13941 case FLASH_5717VENDOR_ST_A_M45PE20:
13942 case FLASH_5717VENDOR_ST_25USPT:
13943 case FLASH_5717VENDOR_ST_45USPT:
13944 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000013945 tg3_flag_set(tp, NVRAM_BUFFERED);
13946 tg3_flag_set(tp, FLASH);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013947
13948 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13949 case FLASH_5717VENDOR_ST_M_M25PE20:
Matt Carlsona1b950d2009-09-01 13:20:17 +000013950 case FLASH_5717VENDOR_ST_M_M45PE20:
Matt Carlson66ee33b2011-04-05 14:22:51 +000013951 /* Detect size with tg3_nvram_get_size() */
13952 break;
13953 case FLASH_5717VENDOR_ST_A_M25PE20:
Matt Carlsona1b950d2009-09-01 13:20:17 +000013954 case FLASH_5717VENDOR_ST_A_M45PE20:
13955 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13956 break;
13957 default:
13958 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13959 break;
13960 }
Matt Carlson321d32a2008-11-21 17:22:19 -080013961 break;
Matt Carlsona1b950d2009-09-01 13:20:17 +000013962 default:
Joe Perches63c3a662011-04-26 08:12:10 +000013963 tg3_flag_set(tp, NO_NVRAM);
Matt Carlsona1b950d2009-09-01 13:20:17 +000013964 return;
Matt Carlson321d32a2008-11-21 17:22:19 -080013965 }
Matt Carlsona1b950d2009-09-01 13:20:17 +000013966
13967 tg3_nvram_get_pagesize(tp, nvcfg1);
13968 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000013969 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Matt Carlson321d32a2008-11-21 17:22:19 -080013970}
13971
Bill Pemberton229b1ad2012-12-03 09:22:59 -050013972static void tg3_get_5720_nvram_info(struct tg3 *tp)
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013973{
13974 u32 nvcfg1, nvmpinstrp;
13975
13976 nvcfg1 = tr32(NVRAM_CFG1);
13977 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13978
Joe Perches41535772013-02-16 11:20:04 +000013979 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
Michael Chanc86a8562013-01-06 12:51:08 +000013980 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13981 tg3_flag_set(tp, NO_NVRAM);
13982 return;
13983 }
13984
13985 switch (nvmpinstrp) {
13986 case FLASH_5762_EEPROM_HD:
13987 nvmpinstrp = FLASH_5720_EEPROM_HD;
Dan Carpenter17e1a422013-01-11 09:57:33 +030013988 break;
Michael Chanc86a8562013-01-06 12:51:08 +000013989 case FLASH_5762_EEPROM_LD:
13990 nvmpinstrp = FLASH_5720_EEPROM_LD;
Dan Carpenter17e1a422013-01-11 09:57:33 +030013991 break;
Michael Chanc86a8562013-01-06 12:51:08 +000013992 }
13993 }
13994
Matt Carlson9b91b5f2011-04-05 14:22:47 +000013995 switch (nvmpinstrp) {
13996 case FLASH_5720_EEPROM_HD:
13997 case FLASH_5720_EEPROM_LD:
13998 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000013999 tg3_flag_set(tp, NVRAM_BUFFERED);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014000
14001 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14002 tw32(NVRAM_CFG1, nvcfg1);
14003 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14004 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14005 else
14006 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14007 return;
14008 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14009 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14010 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14011 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14012 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14013 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14014 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14015 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14016 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14017 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14018 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14019 case FLASH_5720VENDOR_ATMEL_45USPT:
14020 tp->nvram_jedecnum = JEDEC_ATMEL;
Joe Perches63c3a662011-04-26 08:12:10 +000014021 tg3_flag_set(tp, NVRAM_BUFFERED);
14022 tg3_flag_set(tp, FLASH);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014023
14024 switch (nvmpinstrp) {
14025 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14026 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14027 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14028 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14029 break;
14030 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14031 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14032 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14033 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14034 break;
14035 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14036 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14037 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14038 break;
14039 default:
Joe Perches41535772013-02-16 11:20:04 +000014040 if (tg3_asic_rev(tp) != ASIC_REV_5762)
Michael Chanc5d0b722013-02-14 12:13:40 +000014041 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014042 break;
14043 }
14044 break;
14045 case FLASH_5720VENDOR_M_ST_M25PE10:
14046 case FLASH_5720VENDOR_M_ST_M45PE10:
14047 case FLASH_5720VENDOR_A_ST_M25PE10:
14048 case FLASH_5720VENDOR_A_ST_M45PE10:
14049 case FLASH_5720VENDOR_M_ST_M25PE20:
14050 case FLASH_5720VENDOR_M_ST_M45PE20:
14051 case FLASH_5720VENDOR_A_ST_M25PE20:
14052 case FLASH_5720VENDOR_A_ST_M45PE20:
14053 case FLASH_5720VENDOR_M_ST_M25PE40:
14054 case FLASH_5720VENDOR_M_ST_M45PE40:
14055 case FLASH_5720VENDOR_A_ST_M25PE40:
14056 case FLASH_5720VENDOR_A_ST_M45PE40:
14057 case FLASH_5720VENDOR_M_ST_M25PE80:
14058 case FLASH_5720VENDOR_M_ST_M45PE80:
14059 case FLASH_5720VENDOR_A_ST_M25PE80:
14060 case FLASH_5720VENDOR_A_ST_M45PE80:
14061 case FLASH_5720VENDOR_ST_25USPT:
14062 case FLASH_5720VENDOR_ST_45USPT:
14063 tp->nvram_jedecnum = JEDEC_ST;
Joe Perches63c3a662011-04-26 08:12:10 +000014064 tg3_flag_set(tp, NVRAM_BUFFERED);
14065 tg3_flag_set(tp, FLASH);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014066
14067 switch (nvmpinstrp) {
14068 case FLASH_5720VENDOR_M_ST_M25PE20:
14069 case FLASH_5720VENDOR_M_ST_M45PE20:
14070 case FLASH_5720VENDOR_A_ST_M25PE20:
14071 case FLASH_5720VENDOR_A_ST_M45PE20:
14072 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14073 break;
14074 case FLASH_5720VENDOR_M_ST_M25PE40:
14075 case FLASH_5720VENDOR_M_ST_M45PE40:
14076 case FLASH_5720VENDOR_A_ST_M25PE40:
14077 case FLASH_5720VENDOR_A_ST_M45PE40:
14078 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14079 break;
14080 case FLASH_5720VENDOR_M_ST_M25PE80:
14081 case FLASH_5720VENDOR_M_ST_M45PE80:
14082 case FLASH_5720VENDOR_A_ST_M25PE80:
14083 case FLASH_5720VENDOR_A_ST_M45PE80:
14084 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14085 break;
14086 default:
Joe Perches41535772013-02-16 11:20:04 +000014087 if (tg3_asic_rev(tp) != ASIC_REV_5762)
Michael Chanc5d0b722013-02-14 12:13:40 +000014088 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014089 break;
14090 }
14091 break;
14092 default:
Joe Perches63c3a662011-04-26 08:12:10 +000014093 tg3_flag_set(tp, NO_NVRAM);
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014094 return;
14095 }
14096
14097 tg3_nvram_get_pagesize(tp, nvcfg1);
14098 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
Joe Perches63c3a662011-04-26 08:12:10 +000014099 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
Michael Chanc86a8562013-01-06 12:51:08 +000014100
Joe Perches41535772013-02-16 11:20:04 +000014101 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
Michael Chanc86a8562013-01-06 12:51:08 +000014102 u32 val;
14103
14104 if (tg3_nvram_read(tp, 0, &val))
14105 return;
14106
14107 if (val != TG3_EEPROM_MAGIC &&
14108 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14109 tg3_flag_set(tp, NO_NVRAM);
14110 }
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014111}
14112
Linus Torvalds1da177e2005-04-16 15:20:36 -070014113/* Chips other than 5700/5701 use the NVRAM for fetching info. */
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014114static void tg3_nvram_init(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014115{
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000014116 if (tg3_flag(tp, IS_SSB_CORE)) {
14117 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14118 tg3_flag_clear(tp, NVRAM);
14119 tg3_flag_clear(tp, NVRAM_BUFFERED);
14120 tg3_flag_set(tp, NO_NVRAM);
14121 return;
14122 }
14123
Linus Torvalds1da177e2005-04-16 15:20:36 -070014124 tw32_f(GRC_EEPROM_ADDR,
14125 (EEPROM_ADDR_FSM_RESET |
14126 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14127 EEPROM_ADDR_CLKPERD_SHIFT)));
14128
Michael Chan9d57f012006-12-07 00:23:25 -080014129 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014130
14131 /* Enable seeprom accesses. */
14132 tw32_f(GRC_LOCAL_CTRL,
14133 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14134 udelay(100);
14135
Joe Perches41535772013-02-16 11:20:04 +000014136 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14137 tg3_asic_rev(tp) != ASIC_REV_5701) {
Joe Perches63c3a662011-04-26 08:12:10 +000014138 tg3_flag_set(tp, NVRAM);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014139
Michael Chanec41c7d2006-01-17 02:40:55 -080014140 if (tg3_nvram_lock(tp)) {
Matt Carlson5129c3a2010-04-05 10:19:23 +000014141 netdev_warn(tp->dev,
14142 "Cannot get nvram lock, %s failed\n",
Joe Perches05dbe002010-02-17 19:44:19 +000014143 __func__);
Michael Chanec41c7d2006-01-17 02:40:55 -080014144 return;
14145 }
Michael Chane6af3012005-04-21 17:12:05 -070014146 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014147
Matt Carlson989a9d22007-05-05 11:51:05 -070014148 tp->nvram_size = 0;
14149
Joe Perches41535772013-02-16 11:20:04 +000014150 if (tg3_asic_rev(tp) == ASIC_REV_5752)
Michael Chan361b4ac2005-04-21 17:11:21 -070014151 tg3_get_5752_nvram_info(tp);
Joe Perches41535772013-02-16 11:20:04 +000014152 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
Michael Chand3c7b882006-03-23 01:28:25 -080014153 tg3_get_5755_nvram_info(tp);
Joe Perches41535772013-02-16 11:20:04 +000014154 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14155 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14156 tg3_asic_rev(tp) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080014157 tg3_get_5787_nvram_info(tp);
Joe Perches41535772013-02-16 11:20:04 +000014158 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
Matt Carlson6b91fa02007-10-10 18:01:09 -070014159 tg3_get_5761_nvram_info(tp);
Joe Perches41535772013-02-16 11:20:04 +000014160 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
Michael Chanb5d37722006-09-27 16:06:21 -070014161 tg3_get_5906_nvram_info(tp);
Joe Perches41535772013-02-16 11:20:04 +000014162 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
Matt Carlson55086ad2011-12-14 11:09:59 +000014163 tg3_flag(tp, 57765_CLASS))
Matt Carlson321d32a2008-11-21 17:22:19 -080014164 tg3_get_57780_nvram_info(tp);
Joe Perches41535772013-02-16 11:20:04 +000014165 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14166 tg3_asic_rev(tp) == ASIC_REV_5719)
Matt Carlsona1b950d2009-09-01 13:20:17 +000014167 tg3_get_5717_nvram_info(tp);
Joe Perches41535772013-02-16 11:20:04 +000014168 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14169 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlson9b91b5f2011-04-05 14:22:47 +000014170 tg3_get_5720_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070014171 else
14172 tg3_get_nvram_info(tp);
14173
Matt Carlson989a9d22007-05-05 11:51:05 -070014174 if (tp->nvram_size == 0)
14175 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014176
Michael Chane6af3012005-04-21 17:12:05 -070014177 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080014178 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014179
14180 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000014181 tg3_flag_clear(tp, NVRAM);
14182 tg3_flag_clear(tp, NVRAM_BUFFERED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014183
14184 tg3_get_eeprom_size(tp);
14185 }
14186}
14187
Linus Torvalds1da177e2005-04-16 15:20:36 -070014188struct subsys_tbl_ent {
14189 u16 subsys_vendor, subsys_devid;
14190 u32 phy_id;
14191};
14192
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014193static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070014194 /* Broadcom boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000014195 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014196 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014197 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014198 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014199 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014200 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014201 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14202 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14203 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014204 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014205 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014206 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014207 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14208 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14209 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014210 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014211 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014212 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014213 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014214 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014215 { TG3PCI_SUBVENDOR_ID_BROADCOM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014216 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070014217
14218 /* 3com boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000014219 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014220 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014221 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014222 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014223 { TG3PCI_SUBVENDOR_ID_3COM,
14224 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14225 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014226 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014227 { TG3PCI_SUBVENDOR_ID_3COM,
Matt Carlson79eb6902010-02-17 15:17:03 +000014228 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070014229
14230 /* DELL boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000014231 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000014232 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014233 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000014234 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014235 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000014236 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014237 { TG3PCI_SUBVENDOR_ID_DELL,
Matt Carlson79eb6902010-02-17 15:17:03 +000014238 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070014239
14240 /* Compaq boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000014241 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000014242 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014243 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000014244 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014245 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14246 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14247 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000014248 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
Matt Carlson24daf2b2010-02-17 15:17:02 +000014249 { TG3PCI_SUBVENDOR_ID_COMPAQ,
Matt Carlson79eb6902010-02-17 15:17:03 +000014250 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070014251
14252 /* IBM boards. */
Matt Carlson24daf2b2010-02-17 15:17:02 +000014253 { TG3PCI_SUBVENDOR_ID_IBM,
14254 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014255};
14256
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014257static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014258{
14259 int i;
14260
14261 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14262 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14263 tp->pdev->subsystem_vendor) &&
14264 (subsys_id_to_phy_id[i].subsys_devid ==
14265 tp->pdev->subsystem_device))
14266 return &subsys_id_to_phy_id[i];
14267 }
14268 return NULL;
14269}
14270
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014271static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014272{
Linus Torvalds1da177e2005-04-16 15:20:36 -070014273 u32 val;
David S. Millerf49639e2006-06-09 11:58:36 -070014274
Matt Carlson79eb6902010-02-17 15:17:03 +000014275 tp->phy_id = TG3_PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070014276 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14277
Gary Zambranoa85feb82007-05-05 11:52:19 -070014278 /* Assume an onboard device and WOL capable by default. */
Joe Perches63c3a662011-04-26 08:12:10 +000014279 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14280 tg3_flag_set(tp, WOL_CAP);
David S. Miller72b845e2006-03-14 14:11:48 -080014281
Joe Perches41535772013-02-16 11:20:04 +000014282 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080014283 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014284 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14285 tg3_flag_set(tp, IS_NIC);
Michael Chan9d26e212006-12-07 00:21:14 -080014286 }
Matt Carlson0527ba32007-10-10 18:03:30 -070014287 val = tr32(VCPU_CFGSHDW);
14288 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Joe Perches63c3a662011-04-26 08:12:10 +000014289 tg3_flag_set(tp, ASPM_WORKAROUND);
Matt Carlson0527ba32007-10-10 18:03:30 -070014290 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000014291 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014292 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000014293 device_set_wakeup_enable(&tp->pdev->dev, true);
14294 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080014295 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070014296 }
14297
Linus Torvalds1da177e2005-04-16 15:20:36 -070014298 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14299 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14300 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070014301 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070014302 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014303
14304 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14305 tp->nic_sram_data_cfg = nic_cfg;
14306
14307 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14308 ver >>= NIC_SRAM_DATA_VER_SHIFT;
Joe Perches41535772013-02-16 11:20:04 +000014309 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14310 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14311 tg3_asic_rev(tp) != ASIC_REV_5703 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070014312 (ver > 0) && (ver < 0x100))
14313 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14314
Joe Perches41535772013-02-16 11:20:04 +000014315 if (tg3_asic_rev(tp) == ASIC_REV_5785)
Matt Carlsona9daf362008-05-25 23:49:44 -070014316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14317
Linus Torvalds1da177e2005-04-16 15:20:36 -070014318 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14319 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14320 eeprom_phy_serdes = 1;
14321
14322 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14323 if (nic_phy_id != 0) {
14324 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14325 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14326
14327 eeprom_phy_id = (id1 >> 16) << 10;
14328 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14329 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14330 } else
14331 eeprom_phy_id = 0;
14332
Michael Chan7d0c41e2005-04-21 17:06:20 -070014333 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070014334 if (eeprom_phy_serdes) {
Joe Perches63c3a662011-04-26 08:12:10 +000014335 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014336 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Matt Carlsona50d0792010-06-05 17:24:37 +000014337 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014338 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
Michael Chan747e8f82005-07-25 12:33:22 -070014339 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070014340
Joe Perches63c3a662011-04-26 08:12:10 +000014341 if (tg3_flag(tp, 5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -070014342 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14343 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070014344 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070014345 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14346
14347 switch (led_cfg) {
14348 default:
14349 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14350 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14351 break;
14352
14353 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14354 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14355 break;
14356
14357 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14358 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070014359
14360 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14361 * read on some older 5700/5701 bootcode.
14362 */
Joe Perches41535772013-02-16 11:20:04 +000014363 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14364 tg3_asic_rev(tp) == ASIC_REV_5701)
Michael Chan9ba27792005-06-06 15:16:20 -070014365 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14366
Linus Torvalds1da177e2005-04-16 15:20:36 -070014367 break;
14368
14369 case SHASTA_EXT_LED_SHARED:
14370 tp->led_ctrl = LED_CTRL_MODE_SHARED;
Joe Perches41535772013-02-16 11:20:04 +000014371 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14372 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014373 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14374 LED_CTRL_MODE_PHY_2);
14375 break;
14376
14377 case SHASTA_EXT_LED_MAC:
14378 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14379 break;
14380
14381 case SHASTA_EXT_LED_COMBO:
14382 tp->led_ctrl = LED_CTRL_MODE_COMBO;
Joe Perches41535772013-02-16 11:20:04 +000014383 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014384 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14385 LED_CTRL_MODE_PHY_2);
14386 break;
14387
Stephen Hemminger855e1112008-04-16 16:37:28 -070014388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014389
Joe Perches41535772013-02-16 11:20:04 +000014390 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14391 tg3_asic_rev(tp) == ASIC_REV_5701) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070014392 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14393 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14394
Joe Perches41535772013-02-16 11:20:04 +000014395 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
Matt Carlsonb2a5c192008-04-03 21:44:44 -070014396 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080014397
Michael Chan9d26e212006-12-07 00:21:14 -080014398 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Joe Perches63c3a662011-04-26 08:12:10 +000014399 tg3_flag_set(tp, EEPROM_WRITE_PROT);
Michael Chan9d26e212006-12-07 00:21:14 -080014400 if ((tp->pdev->subsystem_vendor ==
14401 PCI_VENDOR_ID_ARIMA) &&
14402 (tp->pdev->subsystem_device == 0x205a ||
14403 tp->pdev->subsystem_device == 0x2063))
Joe Perches63c3a662011-04-26 08:12:10 +000014404 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
Michael Chan9d26e212006-12-07 00:21:14 -080014405 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000014406 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14407 tg3_flag_set(tp, IS_NIC);
Michael Chan9d26e212006-12-07 00:21:14 -080014408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014409
14410 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
Joe Perches63c3a662011-04-26 08:12:10 +000014411 tg3_flag_set(tp, ENABLE_ASF);
14412 if (tg3_flag(tp, 5750_PLUS))
14413 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014414 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080014415
14416 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
Joe Perches63c3a662011-04-26 08:12:10 +000014417 tg3_flag(tp, 5750_PLUS))
14418 tg3_flag_set(tp, ENABLE_APE);
Matt Carlsonb2b98d42008-11-03 16:52:32 -080014419
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014420 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
Gary Zambranoa85feb82007-05-05 11:52:19 -070014421 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
Joe Perches63c3a662011-04-26 08:12:10 +000014422 tg3_flag_clear(tp, WOL_CAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014423
Joe Perches63c3a662011-04-26 08:12:10 +000014424 if (tg3_flag(tp, WOL_CAP) &&
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000014425 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
Joe Perches63c3a662011-04-26 08:12:10 +000014426 tg3_flag_set(tp, WOL_ENABLE);
Rafael J. Wysocki6fdbab92011-04-28 11:02:15 +000014427 device_set_wakeup_enable(&tp->pdev->dev, true);
14428 }
Matt Carlson0527ba32007-10-10 18:03:30 -070014429
Linus Torvalds1da177e2005-04-16 15:20:36 -070014430 if (cfg2 & (1 << 17))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014431 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014432
14433 /* serdes signal pre-emphasis in register 0x590 set by */
14434 /* bootcode if bit 18 is set */
14435 if (cfg2 & (1 << 18))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014436 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070014437
Joe Perches63c3a662011-04-26 08:12:10 +000014438 if ((tg3_flag(tp, 57765_PLUS) ||
Joe Perches41535772013-02-16 11:20:04 +000014439 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14440 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
Matt Carlson6833c042008-11-21 17:18:59 -080014441 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014442 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
Matt Carlson6833c042008-11-21 17:18:59 -080014443
Joe Perches63c3a662011-04-26 08:12:10 +000014444 if (tg3_flag(tp, PCI_EXPRESS) &&
Joe Perches41535772013-02-16 11:20:04 +000014445 tg3_asic_rev(tp) != ASIC_REV_5785 &&
Joe Perches63c3a662011-04-26 08:12:10 +000014446 !tg3_flag(tp, 57765_PLUS)) {
Matt Carlson8ed5d972007-05-07 00:25:49 -070014447 u32 cfg3;
14448
14449 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14450 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
Joe Perches63c3a662011-04-26 08:12:10 +000014451 tg3_flag_set(tp, ASPM_WORKAROUND);
Matt Carlson8ed5d972007-05-07 00:25:49 -070014452 }
Matt Carlsona9daf362008-05-25 23:49:44 -070014453
Matt Carlson14417062010-02-17 15:16:59 +000014454 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
Joe Perches63c3a662011-04-26 08:12:10 +000014455 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
Matt Carlsona9daf362008-05-25 23:49:44 -070014456 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
Joe Perches63c3a662011-04-26 08:12:10 +000014457 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
Matt Carlsona9daf362008-05-25 23:49:44 -070014458 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
Joe Perches63c3a662011-04-26 08:12:10 +000014459 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014460 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080014461done:
Joe Perches63c3a662011-04-26 08:12:10 +000014462 if (tg3_flag(tp, WOL_CAP))
Rafael J. Wysocki43067ed2011-02-10 06:53:09 +000014463 device_set_wakeup_enable(&tp->pdev->dev,
Joe Perches63c3a662011-04-26 08:12:10 +000014464 tg3_flag(tp, WOL_ENABLE));
Rafael J. Wysocki43067ed2011-02-10 06:53:09 +000014465 else
14466 device_set_wakeup_capable(&tp->pdev->dev, false);
Michael Chan7d0c41e2005-04-21 17:06:20 -070014467}
14468
Michael Chanc86a8562013-01-06 12:51:08 +000014469static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14470{
14471 int i, err;
14472 u32 val2, off = offset * 8;
14473
14474 err = tg3_nvram_lock(tp);
14475 if (err)
14476 return err;
14477
14478 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14479 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14480 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14481 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14482 udelay(10);
14483
14484 for (i = 0; i < 100; i++) {
14485 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14486 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14487 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14488 break;
14489 }
14490 udelay(10);
14491 }
14492
14493 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14494
14495 tg3_nvram_unlock(tp);
14496 if (val2 & APE_OTP_STATUS_CMD_DONE)
14497 return 0;
14498
14499 return -EBUSY;
14500}
14501
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014502static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
Matt Carlsonb2a5c192008-04-03 21:44:44 -070014503{
14504 int i;
14505 u32 val;
14506
14507 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14508 tw32(OTP_CTRL, cmd);
14509
14510 /* Wait for up to 1 ms for command to execute. */
14511 for (i = 0; i < 100; i++) {
14512 val = tr32(OTP_STATUS);
14513 if (val & OTP_STATUS_CMD_DONE)
14514 break;
14515 udelay(10);
14516 }
14517
14518 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14519}
14520
14521/* Read the gphy configuration from the OTP region of the chip. The gphy
14522 * configuration is a 32-bit value that straddles the alignment boundary.
14523 * We do two 32-bit reads and then shift and merge the results.
14524 */
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014525static u32 tg3_read_otp_phycfg(struct tg3 *tp)
Matt Carlsonb2a5c192008-04-03 21:44:44 -070014526{
14527 u32 bhalf_otp, thalf_otp;
14528
14529 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14530
14531 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14532 return 0;
14533
14534 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14535
14536 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14537 return 0;
14538
14539 thalf_otp = tr32(OTP_READ_DATA);
14540
14541 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14542
14543 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14544 return 0;
14545
14546 bhalf_otp = tr32(OTP_READ_DATA);
14547
14548 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14549}
14550
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014551static void tg3_phy_init_link_config(struct tg3 *tp)
Matt Carlsone256f8a2011-03-09 16:58:24 +000014552{
Hiroaki SHIMODA202ff1c2011-11-22 04:05:41 +000014553 u32 adv = ADVERTISED_Autoneg;
Matt Carlsone256f8a2011-03-09 16:58:24 +000014554
14555 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14556 adv |= ADVERTISED_1000baseT_Half |
14557 ADVERTISED_1000baseT_Full;
14558
14559 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14560 adv |= ADVERTISED_100baseT_Half |
14561 ADVERTISED_100baseT_Full |
14562 ADVERTISED_10baseT_Half |
14563 ADVERTISED_10baseT_Full |
14564 ADVERTISED_TP;
14565 else
14566 adv |= ADVERTISED_FIBRE;
14567
14568 tp->link_config.advertising = adv;
Matt Carlsone7405222012-02-13 15:20:16 +000014569 tp->link_config.speed = SPEED_UNKNOWN;
14570 tp->link_config.duplex = DUPLEX_UNKNOWN;
Matt Carlsone256f8a2011-03-09 16:58:24 +000014571 tp->link_config.autoneg = AUTONEG_ENABLE;
Matt Carlsone7405222012-02-13 15:20:16 +000014572 tp->link_config.active_speed = SPEED_UNKNOWN;
14573 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
Matt Carlson34655ad2012-02-22 12:35:18 +000014574
14575 tp->old_link = -1;
Matt Carlsone256f8a2011-03-09 16:58:24 +000014576}
14577
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014578static int tg3_phy_probe(struct tg3 *tp)
Michael Chan7d0c41e2005-04-21 17:06:20 -070014579{
14580 u32 hw_phy_id_1, hw_phy_id_2;
14581 u32 hw_phy_id, hw_phy_id_masked;
14582 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014583
Matt Carlsone256f8a2011-03-09 16:58:24 +000014584 /* flow control autonegotiation is default behavior */
Joe Perches63c3a662011-04-26 08:12:10 +000014585 tg3_flag_set(tp, PAUSE_AUTONEG);
Matt Carlsone256f8a2011-03-09 16:58:24 +000014586 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14587
Michael Chan8151ad52012-07-29 19:15:41 +000014588 if (tg3_flag(tp, ENABLE_APE)) {
14589 switch (tp->pci_fn) {
14590 case 0:
14591 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14592 break;
14593 case 1:
14594 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14595 break;
14596 case 2:
14597 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14598 break;
14599 case 3:
14600 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14601 break;
14602 }
14603 }
14604
Joe Perches63c3a662011-04-26 08:12:10 +000014605 if (tg3_flag(tp, USE_PHYLIB))
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070014606 return tg3_phy_init(tp);
14607
Linus Torvalds1da177e2005-04-16 15:20:36 -070014608 /* Reading the PHY ID register can conflict with ASF
Nick Andrew877d0312009-01-26 11:06:57 +010014609 * firmware access to the PHY hardware.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014610 */
14611 err = 0;
Joe Perches63c3a662011-04-26 08:12:10 +000014612 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
Matt Carlson79eb6902010-02-17 15:17:03 +000014613 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014614 } else {
14615 /* Now read the physical PHY_ID from the chip and verify
14616 * that it is sane. If it doesn't look good, we fall back
14617 * to either the hard-coded table based PHY_ID and failing
14618 * that the value found in the eeprom area.
14619 */
14620 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14621 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14622
14623 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14624 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14625 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14626
Matt Carlson79eb6902010-02-17 15:17:03 +000014627 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014628 }
14629
Matt Carlson79eb6902010-02-17 15:17:03 +000014630 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070014631 tp->phy_id = hw_phy_id;
Matt Carlson79eb6902010-02-17 15:17:03 +000014632 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014633 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070014634 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014635 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014636 } else {
Matt Carlson79eb6902010-02-17 15:17:03 +000014637 if (tp->phy_id != TG3_PHY_ID_INVALID) {
Michael Chan7d0c41e2005-04-21 17:06:20 -070014638 /* Do nothing, phy ID already set up in
14639 * tg3_get_eeprom_hw_cfg().
14640 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014641 } else {
14642 struct subsys_tbl_ent *p;
14643
14644 /* No eeprom signature? Try the hardcoded
14645 * subsys device table.
14646 */
Matt Carlson24daf2b2010-02-17 15:17:02 +000014647 p = tg3_lookup_by_subsys(tp);
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000014648 if (p) {
14649 tp->phy_id = p->phy_id;
14650 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14651 /* For now we saw the IDs 0xbc050cd0,
14652 * 0xbc050f80 and 0xbc050c30 on devices
14653 * connected to an BCM4785 and there are
14654 * probably more. Just assume that the phy is
14655 * supported when it is connected to a SSB core
14656 * for now.
14657 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014658 return -ENODEV;
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000014659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014660
Linus Torvalds1da177e2005-04-16 15:20:36 -070014661 if (!tp->phy_id ||
Matt Carlson79eb6902010-02-17 15:17:03 +000014662 tp->phy_id == TG3_PHY_ID_BCM8002)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014663 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014664 }
14665 }
14666
Matt Carlsona6b68da2010-12-06 08:28:52 +000014667 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
Joe Perches41535772013-02-16 11:20:04 +000014668 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14669 tg3_asic_rev(tp) == ASIC_REV_5720 ||
Nithin Sujirc4dab502013-03-06 17:02:34 +000014670 tg3_asic_rev(tp) == ASIC_REV_57766 ||
Joe Perches41535772013-02-16 11:20:04 +000014671 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14672 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14673 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14674 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14675 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
Matt Carlson52b02d02010-10-14 10:37:41 +000014676 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14677
Matt Carlsone256f8a2011-03-09 16:58:24 +000014678 tg3_phy_init_link_config(tp);
14679
Matt Carlsonf07e9af2010-08-02 11:26:07 +000014680 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
Joe Perches63c3a662011-04-26 08:12:10 +000014681 !tg3_flag(tp, ENABLE_APE) &&
14682 !tg3_flag(tp, ENABLE_ASF)) {
Matt Carlsone2bf73e2011-12-08 14:40:15 +000014683 u32 bmsr, dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014684
14685 tg3_readphy(tp, MII_BMSR, &bmsr);
14686 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14687 (bmsr & BMSR_LSTATUS))
14688 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040014689
Linus Torvalds1da177e2005-04-16 15:20:36 -070014690 err = tg3_phy_reset(tp);
14691 if (err)
14692 return err;
14693
Matt Carlson42b64a42011-05-19 12:12:49 +000014694 tg3_phy_set_wirespeed(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014695
Matt Carlsone2bf73e2011-12-08 14:40:15 +000014696 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
Matt Carlson42b64a42011-05-19 12:12:49 +000014697 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14698 tp->link_config.flowctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070014699
14700 tg3_writephy(tp, MII_BMCR,
14701 BMCR_ANENABLE | BMCR_ANRESTART);
14702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014703 }
14704
14705skip_phy_reset:
Matt Carlson79eb6902010-02-17 15:17:03 +000014706 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070014707 err = tg3_init_5401phy_dsp(tp);
14708 if (err)
14709 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014710
Linus Torvalds1da177e2005-04-16 15:20:36 -070014711 err = tg3_init_5401phy_dsp(tp);
14712 }
14713
Linus Torvalds1da177e2005-04-16 15:20:36 -070014714 return err;
14715}
14716
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014717static void tg3_read_vpd(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014718{
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014719 u8 *vpd_data;
Matt Carlson4181b2c2010-02-26 14:04:45 +000014720 unsigned int block_end, rosize, len;
Matt Carlson535a4902011-07-20 10:20:56 +000014721 u32 vpdlen;
Matt Carlson184b8902010-04-05 10:19:25 +000014722 int j, i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014723
Matt Carlson535a4902011-07-20 10:20:56 +000014724 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014725 if (!vpd_data)
14726 goto out_no_vpd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070014727
Matt Carlson535a4902011-07-20 10:20:56 +000014728 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
Matt Carlson4181b2c2010-02-26 14:04:45 +000014729 if (i < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070014730 goto out_not_found;
Matt Carlson4181b2c2010-02-26 14:04:45 +000014731
14732 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14733 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14734 i += PCI_VPD_LRDT_TAG_SIZE;
14735
Matt Carlson535a4902011-07-20 10:20:56 +000014736 if (block_end > vpdlen)
Matt Carlson4181b2c2010-02-26 14:04:45 +000014737 goto out_not_found;
14738
Matt Carlson184b8902010-04-05 10:19:25 +000014739 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14740 PCI_VPD_RO_KEYWORD_MFR_ID);
14741 if (j > 0) {
14742 len = pci_vpd_info_field_size(&vpd_data[j]);
14743
14744 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14745 if (j + len > block_end || len != 4 ||
14746 memcmp(&vpd_data[j], "1028", 4))
14747 goto partno;
14748
14749 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14750 PCI_VPD_RO_KEYWORD_VENDOR0);
14751 if (j < 0)
14752 goto partno;
14753
14754 len = pci_vpd_info_field_size(&vpd_data[j]);
14755
14756 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14757 if (j + len > block_end)
14758 goto partno;
14759
14760 memcpy(tp->fw_ver, &vpd_data[j], len);
Matt Carlson535a4902011-07-20 10:20:56 +000014761 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
Matt Carlson184b8902010-04-05 10:19:25 +000014762 }
14763
14764partno:
Matt Carlson4181b2c2010-02-26 14:04:45 +000014765 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14766 PCI_VPD_RO_KEYWORD_PARTNO);
14767 if (i < 0)
14768 goto out_not_found;
14769
14770 len = pci_vpd_info_field_size(&vpd_data[i]);
14771
14772 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14773 if (len > TG3_BPN_SIZE ||
Matt Carlson535a4902011-07-20 10:20:56 +000014774 (len + i) > vpdlen)
Matt Carlson4181b2c2010-02-26 14:04:45 +000014775 goto out_not_found;
14776
14777 memcpy(tp->board_part_number, &vpd_data[i], len);
14778
Linus Torvalds1da177e2005-04-16 15:20:36 -070014779out_not_found:
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014780 kfree(vpd_data);
Matt Carlson37a949c2010-09-30 10:34:33 +000014781 if (tp->board_part_number[0])
Matt Carlsona4a8bb12010-09-15 09:00:00 +000014782 return;
14783
14784out_no_vpd:
Joe Perches41535772013-02-16 11:20:04 +000014785 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
Michael Chan79d49692012-11-05 14:26:29 +000014786 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14787 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
Matt Carlson37a949c2010-09-30 10:34:33 +000014788 strcpy(tp->board_part_number, "BCM5717");
14789 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14790 strcpy(tp->board_part_number, "BCM5718");
14791 else
14792 goto nomatch;
Joe Perches41535772013-02-16 11:20:04 +000014793 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
Matt Carlson37a949c2010-09-30 10:34:33 +000014794 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14795 strcpy(tp->board_part_number, "BCM57780");
14796 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14797 strcpy(tp->board_part_number, "BCM57760");
14798 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14799 strcpy(tp->board_part_number, "BCM57790");
14800 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14801 strcpy(tp->board_part_number, "BCM57788");
14802 else
14803 goto nomatch;
Joe Perches41535772013-02-16 11:20:04 +000014804 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
Matt Carlson37a949c2010-09-30 10:34:33 +000014805 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14806 strcpy(tp->board_part_number, "BCM57761");
14807 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14808 strcpy(tp->board_part_number, "BCM57765");
14809 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14810 strcpy(tp->board_part_number, "BCM57781");
14811 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14812 strcpy(tp->board_part_number, "BCM57785");
14813 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14814 strcpy(tp->board_part_number, "BCM57791");
14815 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14816 strcpy(tp->board_part_number, "BCM57795");
14817 else
14818 goto nomatch;
Joe Perches41535772013-02-16 11:20:04 +000014819 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
Matt Carlson55086ad2011-12-14 11:09:59 +000014820 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14821 strcpy(tp->board_part_number, "BCM57762");
14822 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14823 strcpy(tp->board_part_number, "BCM57766");
14824 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14825 strcpy(tp->board_part_number, "BCM57782");
14826 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14827 strcpy(tp->board_part_number, "BCM57786");
14828 else
14829 goto nomatch;
Joe Perches41535772013-02-16 11:20:04 +000014830 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chanb5d37722006-09-27 16:06:21 -070014831 strcpy(tp->board_part_number, "BCM95906");
Matt Carlson37a949c2010-09-30 10:34:33 +000014832 } else {
14833nomatch:
Michael Chanb5d37722006-09-27 16:06:21 -070014834 strcpy(tp->board_part_number, "none");
Matt Carlson37a949c2010-09-30 10:34:33 +000014835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014836}
14837
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014838static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
Matt Carlson9c8a6202007-10-21 16:16:08 -070014839{
14840 u32 val;
14841
Matt Carlsone4f34112009-02-25 14:25:00 +000014842 if (tg3_nvram_read(tp, offset, &val) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014843 (val & 0xfc000000) != 0x0c000000 ||
Matt Carlsone4f34112009-02-25 14:25:00 +000014844 tg3_nvram_read(tp, offset + 4, &val) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014845 val != 0)
14846 return 0;
14847
14848 return 1;
14849}
14850
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014851static void tg3_read_bc_ver(struct tg3 *tp)
Matt Carlsonacd9c112009-02-25 14:26:33 +000014852{
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014853 u32 val, offset, start, ver_offset;
Matt Carlson75f99362010-04-05 10:19:24 +000014854 int i, dst_off;
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014855 bool newver = false;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014856
14857 if (tg3_nvram_read(tp, 0xc, &offset) ||
14858 tg3_nvram_read(tp, 0x4, &start))
14859 return;
14860
14861 offset = tg3_nvram_logical_addr(tp, offset);
14862
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014863 if (tg3_nvram_read(tp, offset, &val))
Matt Carlsonacd9c112009-02-25 14:26:33 +000014864 return;
14865
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014866 if ((val & 0xfc000000) == 0x0c000000) {
14867 if (tg3_nvram_read(tp, offset + 4, &val))
Matt Carlsonacd9c112009-02-25 14:26:33 +000014868 return;
14869
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014870 if (val == 0)
14871 newver = true;
14872 }
14873
Matt Carlson75f99362010-04-05 10:19:24 +000014874 dst_off = strlen(tp->fw_ver);
14875
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014876 if (newver) {
Matt Carlson75f99362010-04-05 10:19:24 +000014877 if (TG3_VER_SIZE - dst_off < 16 ||
14878 tg3_nvram_read(tp, offset + 8, &ver_offset))
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014879 return;
14880
14881 offset = offset + ver_offset - start;
14882 for (i = 0; i < 16; i += 4) {
14883 __be32 v;
14884 if (tg3_nvram_read_be32(tp, offset + i, &v))
14885 return;
14886
Matt Carlson75f99362010-04-05 10:19:24 +000014887 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
Matt Carlsonff3a7cb2009-02-25 14:26:58 +000014888 }
14889 } else {
14890 u32 major, minor;
14891
14892 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14893 return;
14894
14895 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14896 TG3_NVM_BCVER_MAJSFT;
14897 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
Matt Carlson75f99362010-04-05 10:19:24 +000014898 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14899 "v%d.%02d", major, minor);
Matt Carlsonacd9c112009-02-25 14:26:33 +000014900 }
14901}
14902
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014903static void tg3_read_hwsb_ver(struct tg3 *tp)
Matt Carlsona6f6cb12009-02-25 14:27:43 +000014904{
14905 u32 val, major, minor;
14906
14907 /* Use native endian representation */
14908 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14909 return;
14910
14911 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14912 TG3_NVM_HWSB_CFG1_MAJSFT;
14913 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14914 TG3_NVM_HWSB_CFG1_MINSFT;
14915
14916 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14917}
14918
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014919static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
Matt Carlsondfe00d72008-11-21 17:19:41 -080014920{
14921 u32 offset, major, minor, build;
14922
Matt Carlson75f99362010-04-05 10:19:24 +000014923 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
Matt Carlsondfe00d72008-11-21 17:19:41 -080014924
14925 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14926 return;
14927
14928 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14929 case TG3_EEPROM_SB_REVISION_0:
14930 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14931 break;
14932 case TG3_EEPROM_SB_REVISION_2:
14933 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14934 break;
14935 case TG3_EEPROM_SB_REVISION_3:
14936 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14937 break;
Matt Carlsona4153d42010-02-17 15:16:56 +000014938 case TG3_EEPROM_SB_REVISION_4:
14939 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14940 break;
14941 case TG3_EEPROM_SB_REVISION_5:
14942 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14943 break;
Matt Carlsonbba226a2010-10-14 10:37:38 +000014944 case TG3_EEPROM_SB_REVISION_6:
14945 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14946 break;
Matt Carlsondfe00d72008-11-21 17:19:41 -080014947 default:
14948 return;
14949 }
14950
Matt Carlsone4f34112009-02-25 14:25:00 +000014951 if (tg3_nvram_read(tp, offset, &val))
Matt Carlsondfe00d72008-11-21 17:19:41 -080014952 return;
14953
14954 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14955 TG3_EEPROM_SB_EDH_BLD_SHFT;
14956 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14957 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14958 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14959
14960 if (minor > 99 || build > 26)
14961 return;
14962
Matt Carlson75f99362010-04-05 10:19:24 +000014963 offset = strlen(tp->fw_ver);
14964 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14965 " v%d.%02d", major, minor);
Matt Carlsondfe00d72008-11-21 17:19:41 -080014966
14967 if (build > 0) {
Matt Carlson75f99362010-04-05 10:19:24 +000014968 offset = strlen(tp->fw_ver);
14969 if (offset < TG3_VER_SIZE - 1)
14970 tp->fw_ver[offset] = 'a' + build - 1;
Matt Carlsondfe00d72008-11-21 17:19:41 -080014971 }
14972}
14973
Bill Pemberton229b1ad2012-12-03 09:22:59 -050014974static void tg3_read_mgmtfw_ver(struct tg3 *tp)
Michael Chanc4e65752006-03-20 22:29:32 -080014975{
14976 u32 val, offset, start;
Matt Carlsonacd9c112009-02-25 14:26:33 +000014977 int i, vlen;
Matt Carlson9c8a6202007-10-21 16:16:08 -070014978
14979 for (offset = TG3_NVM_DIR_START;
14980 offset < TG3_NVM_DIR_END;
14981 offset += TG3_NVM_DIRENT_SIZE) {
Matt Carlsone4f34112009-02-25 14:25:00 +000014982 if (tg3_nvram_read(tp, offset, &val))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014983 return;
14984
14985 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14986 break;
14987 }
14988
14989 if (offset == TG3_NVM_DIR_END)
14990 return;
14991
Joe Perches63c3a662011-04-26 08:12:10 +000014992 if (!tg3_flag(tp, 5705_PLUS))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014993 start = 0x08000000;
Matt Carlsone4f34112009-02-25 14:25:00 +000014994 else if (tg3_nvram_read(tp, offset - 4, &start))
Matt Carlson9c8a6202007-10-21 16:16:08 -070014995 return;
14996
Matt Carlsone4f34112009-02-25 14:25:00 +000014997 if (tg3_nvram_read(tp, offset + 4, &offset) ||
Matt Carlson9c8a6202007-10-21 16:16:08 -070014998 !tg3_fw_img_is_valid(tp, offset) ||
Matt Carlsone4f34112009-02-25 14:25:00 +000014999 tg3_nvram_read(tp, offset + 8, &val))
Matt Carlson9c8a6202007-10-21 16:16:08 -070015000 return;
15001
15002 offset += val - start;
15003
Matt Carlsonacd9c112009-02-25 14:26:33 +000015004 vlen = strlen(tp->fw_ver);
Matt Carlson9c8a6202007-10-21 16:16:08 -070015005
Matt Carlsonacd9c112009-02-25 14:26:33 +000015006 tp->fw_ver[vlen++] = ',';
15007 tp->fw_ver[vlen++] = ' ';
Matt Carlson9c8a6202007-10-21 16:16:08 -070015008
15009 for (i = 0; i < 4; i++) {
Matt Carlsona9dc5292009-02-25 14:25:30 +000015010 __be32 v;
15011 if (tg3_nvram_read_be32(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070015012 return;
15013
Al Virob9fc7dc2007-12-17 22:59:57 -080015014 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070015015
Matt Carlsonacd9c112009-02-25 14:26:33 +000015016 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15017 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
Matt Carlson9c8a6202007-10-21 16:16:08 -070015018 break;
15019 }
15020
Matt Carlsonacd9c112009-02-25 14:26:33 +000015021 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15022 vlen += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070015023 }
Matt Carlsonacd9c112009-02-25 14:26:33 +000015024}
15025
Bill Pemberton229b1ad2012-12-03 09:22:59 -050015026static void tg3_probe_ncsi(struct tg3 *tp)
Matt Carlson7fd76442009-02-25 14:27:20 +000015027{
Matt Carlson7fd76442009-02-25 14:27:20 +000015028 u32 apedata;
Matt Carlson7fd76442009-02-25 14:27:20 +000015029
15030 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15031 if (apedata != APE_SEG_SIG_MAGIC)
15032 return;
15033
15034 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15035 if (!(apedata & APE_FW_STATUS_READY))
15036 return;
15037
Michael Chan165f4d12012-07-16 16:23:59 +000015038 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15039 tg3_flag_set(tp, APE_HAS_NCSI);
15040}
15041
Bill Pemberton229b1ad2012-12-03 09:22:59 -050015042static void tg3_read_dash_ver(struct tg3 *tp)
Michael Chan165f4d12012-07-16 16:23:59 +000015043{
15044 int vlen;
15045 u32 apedata;
15046 char *fwtype;
15047
Matt Carlson7fd76442009-02-25 14:27:20 +000015048 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15049
Michael Chan165f4d12012-07-16 16:23:59 +000015050 if (tg3_flag(tp, APE_HAS_NCSI))
Matt Carlsonecc79642010-08-02 11:26:01 +000015051 fwtype = "NCSI";
Michael Chanc86a8562013-01-06 12:51:08 +000015052 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15053 fwtype = "SMASH";
Michael Chan165f4d12012-07-16 16:23:59 +000015054 else
Matt Carlsonecc79642010-08-02 11:26:01 +000015055 fwtype = "DASH";
15056
Matt Carlson7fd76442009-02-25 14:27:20 +000015057 vlen = strlen(tp->fw_ver);
15058
Matt Carlsonecc79642010-08-02 11:26:01 +000015059 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15060 fwtype,
Matt Carlson7fd76442009-02-25 14:27:20 +000015061 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15062 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15063 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15064 (apedata & APE_FW_VERSION_BLDMSK));
15065}
15066
Michael Chanc86a8562013-01-06 12:51:08 +000015067static void tg3_read_otp_ver(struct tg3 *tp)
15068{
15069 u32 val, val2;
15070
Joe Perches41535772013-02-16 11:20:04 +000015071 if (tg3_asic_rev(tp) != ASIC_REV_5762)
Michael Chanc86a8562013-01-06 12:51:08 +000015072 return;
15073
15074 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15075 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15076 TG3_OTP_MAGIC0_VALID(val)) {
15077 u64 val64 = (u64) val << 32 | val2;
15078 u32 ver = 0;
15079 int i, vlen;
15080
15081 for (i = 0; i < 7; i++) {
15082 if ((val64 & 0xff) == 0)
15083 break;
15084 ver = val64 & 0xff;
15085 val64 >>= 8;
15086 }
15087 vlen = strlen(tp->fw_ver);
15088 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15089 }
15090}
15091
Bill Pemberton229b1ad2012-12-03 09:22:59 -050015092static void tg3_read_fw_ver(struct tg3 *tp)
Matt Carlsonacd9c112009-02-25 14:26:33 +000015093{
15094 u32 val;
Matt Carlson75f99362010-04-05 10:19:24 +000015095 bool vpd_vers = false;
15096
15097 if (tp->fw_ver[0] != 0)
15098 vpd_vers = true;
Matt Carlsonacd9c112009-02-25 14:26:33 +000015099
Joe Perches63c3a662011-04-26 08:12:10 +000015100 if (tg3_flag(tp, NO_NVRAM)) {
Matt Carlson75f99362010-04-05 10:19:24 +000015101 strcat(tp->fw_ver, "sb");
Michael Chanc86a8562013-01-06 12:51:08 +000015102 tg3_read_otp_ver(tp);
Matt Carlsondf259d82009-04-20 06:57:14 +000015103 return;
15104 }
15105
Matt Carlsonacd9c112009-02-25 14:26:33 +000015106 if (tg3_nvram_read(tp, 0, &val))
15107 return;
15108
15109 if (val == TG3_EEPROM_MAGIC)
15110 tg3_read_bc_ver(tp);
15111 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15112 tg3_read_sb_ver(tp, val);
Matt Carlsona6f6cb12009-02-25 14:27:43 +000015113 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15114 tg3_read_hwsb_ver(tp);
Matt Carlsonacd9c112009-02-25 14:26:33 +000015115
Michael Chan165f4d12012-07-16 16:23:59 +000015116 if (tg3_flag(tp, ENABLE_ASF)) {
15117 if (tg3_flag(tp, ENABLE_APE)) {
15118 tg3_probe_ncsi(tp);
15119 if (!vpd_vers)
15120 tg3_read_dash_ver(tp);
15121 } else if (!vpd_vers) {
15122 tg3_read_mgmtfw_ver(tp);
15123 }
Matt Carlsonc9cab242011-07-13 09:27:27 +000015124 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070015125
15126 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080015127}
15128
Matt Carlson7cb32cf2010-09-30 10:34:36 +000015129static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15130{
Joe Perches63c3a662011-04-26 08:12:10 +000015131 if (tg3_flag(tp, LRG_PROD_RING_CAP))
Matt Carlsonde9f5232011-04-05 14:22:43 +000015132 return TG3_RX_RET_MAX_SIZE_5717;
Joe Perches63c3a662011-04-26 08:12:10 +000015133 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
Matt Carlsonde9f5232011-04-05 14:22:43 +000015134 return TG3_RX_RET_MAX_SIZE_5700;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000015135 else
Matt Carlsonde9f5232011-04-05 14:22:43 +000015136 return TG3_RX_RET_MAX_SIZE_5705;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000015137}
15138
Matt Carlson41434702011-03-09 16:58:22 +000015139static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
Joe Perches895950c2010-12-21 02:16:08 -080015140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15142 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15143 { },
15144};
15145
Bill Pemberton229b1ad2012-12-03 09:22:59 -050015146static struct pci_dev *tg3_find_peer(struct tg3 *tp)
Matt Carlson16c7fa72012-02-13 10:20:10 +000015147{
15148 struct pci_dev *peer;
15149 unsigned int func, devnr = tp->pdev->devfn & ~7;
15150
15151 for (func = 0; func < 8; func++) {
15152 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15153 if (peer && peer != tp->pdev)
15154 break;
15155 pci_dev_put(peer);
15156 }
15157 /* 5704 can be configured in single-port mode, set peer to
15158 * tp->pdev in that case.
15159 */
15160 if (!peer) {
15161 peer = tp->pdev;
15162 return peer;
15163 }
15164
15165 /*
15166 * We don't need to keep the refcount elevated; there's no way
15167 * to remove one half of this device without removing the other
15168 */
15169 pci_dev_put(peer);
15170
15171 return peer;
15172}
15173
Bill Pemberton229b1ad2012-12-03 09:22:59 -050015174static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
Matt Carlson42b123b2012-02-13 15:20:13 +000015175{
15176 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
Joe Perches41535772013-02-16 11:20:04 +000015177 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
Matt Carlson42b123b2012-02-13 15:20:13 +000015178 u32 reg;
15179
15180 /* All devices that use the alternate
15181 * ASIC REV location have a CPMU.
15182 */
15183 tg3_flag_set(tp, CPMU_PRESENT);
15184
15185 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
Michael Chan79d49692012-11-05 14:26:29 +000015186 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
Matt Carlson42b123b2012-02-13 15:20:13 +000015187 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15188 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
Michael Chanc65a17f2013-01-06 12:51:07 +000015189 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15190 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15191 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15192 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
Matt Carlson42b123b2012-02-13 15:20:13 +000015193 reg = TG3PCI_GEN2_PRODID_ASICREV;
15194 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15195 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15196 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15198 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15199 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15200 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15201 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15202 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15203 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15204 reg = TG3PCI_GEN15_PRODID_ASICREV;
15205 else
15206 reg = TG3PCI_PRODID_ASICREV;
15207
15208 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15209 }
15210
15211 /* Wrong chip ID in 5752 A0. This code can be removed later
15212 * as A0 is not in production.
15213 */
Joe Perches41535772013-02-16 11:20:04 +000015214 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
Matt Carlson42b123b2012-02-13 15:20:13 +000015215 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15216
Joe Perches41535772013-02-16 11:20:04 +000015217 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
Michael Chan79d49692012-11-05 14:26:29 +000015218 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15219
Joe Perches41535772013-02-16 11:20:04 +000015220 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15221 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15222 tg3_asic_rev(tp) == ASIC_REV_5720)
Matt Carlson42b123b2012-02-13 15:20:13 +000015223 tg3_flag_set(tp, 5717_PLUS);
15224
Joe Perches41535772013-02-16 11:20:04 +000015225 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15226 tg3_asic_rev(tp) == ASIC_REV_57766)
Matt Carlson42b123b2012-02-13 15:20:13 +000015227 tg3_flag_set(tp, 57765_CLASS);
15228
Michael Chanc65a17f2013-01-06 12:51:07 +000015229 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
Joe Perches41535772013-02-16 11:20:04 +000015230 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlson42b123b2012-02-13 15:20:13 +000015231 tg3_flag_set(tp, 57765_PLUS);
15232
15233 /* Intentionally exclude ASIC_REV_5906 */
Joe Perches41535772013-02-16 11:20:04 +000015234 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15235 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15236 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15237 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15238 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15239 tg3_asic_rev(tp) == ASIC_REV_57780 ||
Matt Carlson42b123b2012-02-13 15:20:13 +000015240 tg3_flag(tp, 57765_PLUS))
15241 tg3_flag_set(tp, 5755_PLUS);
15242
Joe Perches41535772013-02-16 11:20:04 +000015243 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15244 tg3_asic_rev(tp) == ASIC_REV_5714)
Matt Carlson42b123b2012-02-13 15:20:13 +000015245 tg3_flag_set(tp, 5780_CLASS);
15246
Joe Perches41535772013-02-16 11:20:04 +000015247 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15248 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15249 tg3_asic_rev(tp) == ASIC_REV_5906 ||
Matt Carlson42b123b2012-02-13 15:20:13 +000015250 tg3_flag(tp, 5755_PLUS) ||
15251 tg3_flag(tp, 5780_CLASS))
15252 tg3_flag_set(tp, 5750_PLUS);
15253
Joe Perches41535772013-02-16 11:20:04 +000015254 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
Matt Carlson42b123b2012-02-13 15:20:13 +000015255 tg3_flag(tp, 5750_PLUS))
15256 tg3_flag_set(tp, 5705_PLUS);
15257}
15258
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000015259static bool tg3_10_100_only_device(struct tg3 *tp,
15260 const struct pci_device_id *ent)
15261{
15262 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15263
Joe Perches41535772013-02-16 11:20:04 +000015264 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15265 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000015266 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15267 return true;
15268
15269 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
Joe Perches41535772013-02-16 11:20:04 +000015270 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000015271 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15272 return true;
15273 } else {
15274 return true;
15275 }
15276 }
15277
15278 return false;
15279}
15280
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +000015281static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015282{
Linus Torvalds1da177e2005-04-16 15:20:36 -070015283 u32 misc_ctrl_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015284 u32 pci_state_reg, grc_misc_cfg;
15285 u32 val;
15286 u16 pci_cmd;
Matt Carlson5e7dfd02008-11-21 17:18:16 -080015287 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015288
Linus Torvalds1da177e2005-04-16 15:20:36 -070015289 /* Force memory write invalidate off. If we leave it on,
15290 * then on 5700_BX chips we have to enable a workaround.
15291 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15292 * to match the cacheline size. The Broadcom driver have this
15293 * workaround but turns MWI off all the times so never uses
15294 * it. This seems to suggest that the workaround is insufficient.
15295 */
15296 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15297 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15298 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15299
Matt Carlson16821282011-07-13 09:27:28 +000015300 /* Important! -- Make sure register accesses are byteswapped
15301 * correctly. Also, for those chips that require it, make
15302 * sure that indirect register accesses are enabled before
15303 * the first operation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015304 */
15305 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15306 &misc_ctrl_reg);
Matt Carlson16821282011-07-13 09:27:28 +000015307 tp->misc_host_ctrl |= (misc_ctrl_reg &
15308 MISC_HOST_CTRL_CHIPREV);
15309 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15310 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015311
Matt Carlson42b123b2012-02-13 15:20:13 +000015312 tg3_detect_asic_rev(tp, misc_ctrl_reg);
Michael Chanff645be2005-04-21 17:09:53 -070015313
Michael Chan68929142005-08-09 20:17:14 -070015314 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15315 * we need to disable memory and use config. cycles
15316 * only to access all registers. The 5702/03 chips
15317 * can mistakenly decode the special cycles from the
15318 * ICH chipsets as memory write cycles, causing corruption
15319 * of register and memory space. Only certain ICH bridges
15320 * will drive special cycles with non-zero data during the
15321 * address phase which can fall within the 5703's address
15322 * range. This is not an ICH bug as the PCI spec allows
15323 * non-zero address during special cycles. However, only
15324 * these ICH bridges are known to drive non-zero addresses
15325 * during special cycles.
15326 *
15327 * Since special cycles do not cross PCI bridges, we only
15328 * enable this workaround if the 5703 is on the secondary
15329 * bus of these ICH bridges.
15330 */
Joe Perches41535772013-02-16 11:20:04 +000015331 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15332 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
Michael Chan68929142005-08-09 20:17:14 -070015333 static struct tg3_dev_id {
15334 u32 vendor;
15335 u32 device;
15336 u32 rev;
15337 } ich_chipsets[] = {
15338 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15339 PCI_ANY_ID },
15340 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15341 PCI_ANY_ID },
15342 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15343 0xa },
15344 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15345 PCI_ANY_ID },
15346 { },
15347 };
15348 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15349 struct pci_dev *bridge = NULL;
15350
15351 while (pci_id->vendor != 0) {
15352 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15353 bridge);
15354 if (!bridge) {
15355 pci_id++;
15356 continue;
15357 }
15358 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070015359 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070015360 continue;
15361 }
15362 if (bridge->subordinate &&
15363 (bridge->subordinate->number ==
15364 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000015365 tg3_flag_set(tp, ICH_WORKAROUND);
Michael Chan68929142005-08-09 20:17:14 -070015366 pci_dev_put(bridge);
15367 break;
15368 }
15369 }
15370 }
15371
Joe Perches41535772013-02-16 11:20:04 +000015372 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
Matt Carlson41588ba2008-04-19 18:12:33 -070015373 static struct tg3_dev_id {
15374 u32 vendor;
15375 u32 device;
15376 } bridge_chipsets[] = {
15377 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15378 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15379 { },
15380 };
15381 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15382 struct pci_dev *bridge = NULL;
15383
15384 while (pci_id->vendor != 0) {
15385 bridge = pci_get_device(pci_id->vendor,
15386 pci_id->device,
15387 bridge);
15388 if (!bridge) {
15389 pci_id++;
15390 continue;
15391 }
15392 if (bridge->subordinate &&
15393 (bridge->subordinate->number <=
15394 tp->pdev->bus->number) &&
Yinghai Lub918c622012-05-17 18:51:11 -070015395 (bridge->subordinate->busn_res.end >=
Matt Carlson41588ba2008-04-19 18:12:33 -070015396 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000015397 tg3_flag_set(tp, 5701_DMA_BUG);
Matt Carlson41588ba2008-04-19 18:12:33 -070015398 pci_dev_put(bridge);
15399 break;
15400 }
15401 }
15402 }
15403
Michael Chan4a29cc22006-03-19 13:21:12 -080015404 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15405 * DMA addresses > 40-bit. This bridge may have other additional
15406 * 57xx devices behind it in some 4-port NIC designs for example.
15407 * Any tg3 device found behind the bridge will also need the 40-bit
15408 * DMA workaround.
15409 */
Matt Carlson42b123b2012-02-13 15:20:13 +000015410 if (tg3_flag(tp, 5780_CLASS)) {
Joe Perches63c3a662011-04-26 08:12:10 +000015411 tg3_flag_set(tp, 40BIT_DMA_BUG);
Michael Chan4cf78e42005-07-25 12:29:19 -070015412 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Matt Carlson859a588792010-04-05 10:19:28 +000015413 } else {
Michael Chan4a29cc22006-03-19 13:21:12 -080015414 struct pci_dev *bridge = NULL;
15415
15416 do {
15417 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15418 PCI_DEVICE_ID_SERVERWORKS_EPB,
15419 bridge);
15420 if (bridge && bridge->subordinate &&
15421 (bridge->subordinate->number <=
15422 tp->pdev->bus->number) &&
Yinghai Lub918c622012-05-17 18:51:11 -070015423 (bridge->subordinate->busn_res.end >=
Michael Chan4a29cc22006-03-19 13:21:12 -080015424 tp->pdev->bus->number)) {
Joe Perches63c3a662011-04-26 08:12:10 +000015425 tg3_flag_set(tp, 40BIT_DMA_BUG);
Michael Chan4a29cc22006-03-19 13:21:12 -080015426 pci_dev_put(bridge);
15427 break;
15428 }
15429 } while (bridge);
15430 }
Michael Chan4cf78e42005-07-25 12:29:19 -070015431
Joe Perches41535772013-02-16 11:20:04 +000015432 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15433 tg3_asic_rev(tp) == ASIC_REV_5714)
Michael Chan7544b092007-05-05 13:08:32 -070015434 tp->pdev_peer = tg3_find_peer(tp);
15435
Matt Carlson507399f2009-11-13 13:03:37 +000015436 /* Determine TSO capabilities */
Joe Perches41535772013-02-16 11:20:04 +000015437 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
Matt Carlson4d163b72011-01-25 15:58:48 +000015438 ; /* Do nothing. HW bug. */
Joe Perches63c3a662011-04-26 08:12:10 +000015439 else if (tg3_flag(tp, 57765_PLUS))
15440 tg3_flag_set(tp, HW_TSO_3);
15441 else if (tg3_flag(tp, 5755_PLUS) ||
Joe Perches41535772013-02-16 11:20:04 +000015442 tg3_asic_rev(tp) == ASIC_REV_5906)
Joe Perches63c3a662011-04-26 08:12:10 +000015443 tg3_flag_set(tp, HW_TSO_2);
15444 else if (tg3_flag(tp, 5750_PLUS)) {
15445 tg3_flag_set(tp, HW_TSO_1);
15446 tg3_flag_set(tp, TSO_BUG);
Joe Perches41535772013-02-16 11:20:04 +000015447 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15448 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
Joe Perches63c3a662011-04-26 08:12:10 +000015449 tg3_flag_clear(tp, TSO_BUG);
Joe Perches41535772013-02-16 11:20:04 +000015450 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15451 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15452 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
Matt Carlson1caf13e2013-03-06 17:02:29 +000015453 tg3_flag_set(tp, FW_TSO);
15454 tg3_flag_set(tp, TSO_BUG);
Joe Perches41535772013-02-16 11:20:04 +000015455 if (tg3_asic_rev(tp) == ASIC_REV_5705)
Matt Carlson507399f2009-11-13 13:03:37 +000015456 tp->fw_needed = FIRMWARE_TG3TSO5;
15457 else
15458 tp->fw_needed = FIRMWARE_TG3TSO;
15459 }
15460
Matt Carlsondabc5c62011-05-19 12:12:52 +000015461 /* Selectively allow TSO based on operating conditions */
Matt Carlson6ff6f812011-05-19 12:12:54 +000015462 if (tg3_flag(tp, HW_TSO_1) ||
15463 tg3_flag(tp, HW_TSO_2) ||
15464 tg3_flag(tp, HW_TSO_3) ||
Matt Carlson1caf13e2013-03-06 17:02:29 +000015465 tg3_flag(tp, FW_TSO)) {
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000015466 /* For firmware TSO, assume ASF is disabled.
15467 * We'll disable TSO later if we discover ASF
15468 * is enabled in tg3_get_eeprom_hw_cfg().
15469 */
Matt Carlsondabc5c62011-05-19 12:12:52 +000015470 tg3_flag_set(tp, TSO_CAPABLE);
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000015471 } else {
Matt Carlsondabc5c62011-05-19 12:12:52 +000015472 tg3_flag_clear(tp, TSO_CAPABLE);
15473 tg3_flag_clear(tp, TSO_BUG);
15474 tp->fw_needed = NULL;
15475 }
15476
Joe Perches41535772013-02-16 11:20:04 +000015477 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
Matt Carlsondabc5c62011-05-19 12:12:52 +000015478 tp->fw_needed = FIRMWARE_TG3;
15479
Nithin Sujirc4dab502013-03-06 17:02:34 +000015480 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15481 tp->fw_needed = FIRMWARE_TG357766;
15482
Matt Carlson507399f2009-11-13 13:03:37 +000015483 tp->irq_max = 1;
15484
Joe Perches63c3a662011-04-26 08:12:10 +000015485 if (tg3_flag(tp, 5750_PLUS)) {
15486 tg3_flag_set(tp, SUPPORT_MSI);
Joe Perches41535772013-02-16 11:20:04 +000015487 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15488 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15489 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15490 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
Michael Chan7544b092007-05-05 13:08:32 -070015491 tp->pdev_peer == tp->pdev))
Joe Perches63c3a662011-04-26 08:12:10 +000015492 tg3_flag_clear(tp, SUPPORT_MSI);
Michael Chan7544b092007-05-05 13:08:32 -070015493
Joe Perches63c3a662011-04-26 08:12:10 +000015494 if (tg3_flag(tp, 5755_PLUS) ||
Joe Perches41535772013-02-16 11:20:04 +000015495 tg3_asic_rev(tp) == ASIC_REV_5906) {
Joe Perches63c3a662011-04-26 08:12:10 +000015496 tg3_flag_set(tp, 1SHOT_MSI);
Michael Chan52c0fd82006-06-29 20:15:54 -070015497 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015498
Joe Perches63c3a662011-04-26 08:12:10 +000015499 if (tg3_flag(tp, 57765_PLUS)) {
15500 tg3_flag_set(tp, SUPPORT_MSIX);
Matt Carlson507399f2009-11-13 13:03:37 +000015501 tp->irq_max = TG3_IRQ_MAX_VECS;
15502 }
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000015503 }
Matt Carlson0e1406d2009-11-02 12:33:33 +000015504
Michael Chan91024262012-09-28 07:12:38 +000015505 tp->txq_max = 1;
15506 tp->rxq_max = 1;
15507 if (tp->irq_max > 1) {
15508 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15509 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15510
Joe Perches41535772013-02-16 11:20:04 +000015511 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15512 tg3_asic_rev(tp) == ASIC_REV_5720)
Michael Chan91024262012-09-28 07:12:38 +000015513 tp->txq_max = tp->irq_max - 1;
15514 }
15515
Matt Carlsonb7abee62012-06-07 12:56:54 +000015516 if (tg3_flag(tp, 5755_PLUS) ||
Joe Perches41535772013-02-16 11:20:04 +000015517 tg3_asic_rev(tp) == ASIC_REV_5906)
Joe Perches63c3a662011-04-26 08:12:10 +000015518 tg3_flag_set(tp, SHORT_DMA_BUG);
Matt Carlsonf6eb9b12009-09-01 13:19:53 +000015519
Joe Perches41535772013-02-16 11:20:04 +000015520 if (tg3_asic_rev(tp) == ASIC_REV_5719)
Matt Carlsona4cb4282011-12-14 11:09:58 +000015521 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
Matt Carlsone31aa982011-07-27 14:20:53 +000015522
Joe Perches41535772013-02-16 11:20:04 +000015523 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15524 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15525 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15526 tg3_asic_rev(tp) == ASIC_REV_5762)
Joe Perches63c3a662011-04-26 08:12:10 +000015527 tg3_flag_set(tp, LRG_PROD_RING_CAP);
Matt Carlsonde9f5232011-04-05 14:22:43 +000015528
Joe Perches63c3a662011-04-26 08:12:10 +000015529 if (tg3_flag(tp, 57765_PLUS) &&
Joe Perches41535772013-02-16 11:20:04 +000015530 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
Joe Perches63c3a662011-04-26 08:12:10 +000015531 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
Matt Carlsonb703df62009-12-03 08:36:21 +000015532
Joe Perches63c3a662011-04-26 08:12:10 +000015533 if (!tg3_flag(tp, 5705_PLUS) ||
15534 tg3_flag(tp, 5780_CLASS) ||
15535 tg3_flag(tp, USE_JUMBO_BDFLAG))
15536 tg3_flag_set(tp, JUMBO_CAPABLE);
Michael Chan0f893dc2005-07-25 12:30:38 -070015537
Matt Carlson52f44902008-11-21 17:17:04 -080015538 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15539 &pci_state_reg);
15540
Jon Mason708ebb3a2011-06-27 12:56:50 +000015541 if (pci_is_pcie(tp->pdev)) {
Matt Carlson5e7dfd02008-11-21 17:18:16 -080015542 u16 lnkctl;
15543
Joe Perches63c3a662011-04-26 08:12:10 +000015544 tg3_flag_set(tp, PCI_EXPRESS);
Matt Carlson5f5c51e2007-11-12 21:19:37 -080015545
Jiang Liu0f49bfb2012-08-20 13:28:20 -060015546 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
Matt Carlson5e7dfd02008-11-21 17:18:16 -080015547 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
Joe Perches41535772013-02-16 11:20:04 +000015548 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Joe Perches63c3a662011-04-26 08:12:10 +000015549 tg3_flag_clear(tp, HW_TSO_2);
Matt Carlsondabc5c62011-05-19 12:12:52 +000015550 tg3_flag_clear(tp, TSO_CAPABLE);
Matt Carlson7196cd62011-05-19 16:02:44 +000015551 }
Joe Perches41535772013-02-16 11:20:04 +000015552 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15553 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15554 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15555 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
Joe Perches63c3a662011-04-26 08:12:10 +000015556 tg3_flag_set(tp, CLKREQ_BUG);
Joe Perches41535772013-02-16 11:20:04 +000015557 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
Joe Perches63c3a662011-04-26 08:12:10 +000015558 tg3_flag_set(tp, L1PLLPD_EN);
Michael Chanc7835a72006-11-15 21:14:42 -080015559 }
Joe Perches41535772013-02-16 11:20:04 +000015560 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
Jon Mason708ebb3a2011-06-27 12:56:50 +000015561 /* BCM5785 devices are effectively PCIe devices, and should
15562 * follow PCIe codepaths, but do not have a PCIe capabilities
15563 * section.
Matt Carlson93a700a2011-08-31 11:44:54 +000015564 */
Joe Perches63c3a662011-04-26 08:12:10 +000015565 tg3_flag_set(tp, PCI_EXPRESS);
15566 } else if (!tg3_flag(tp, 5705_PLUS) ||
15567 tg3_flag(tp, 5780_CLASS)) {
Matt Carlson52f44902008-11-21 17:17:04 -080015568 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15569 if (!tp->pcix_cap) {
Matt Carlson2445e462010-04-05 10:19:21 +000015570 dev_err(&tp->pdev->dev,
15571 "Cannot find PCI-X capability, aborting\n");
Matt Carlson52f44902008-11-21 17:17:04 -080015572 return -EIO;
15573 }
15574
15575 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
Joe Perches63c3a662011-04-26 08:12:10 +000015576 tg3_flag_set(tp, PCIX_MODE);
Matt Carlson52f44902008-11-21 17:17:04 -080015577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015578
Michael Chan399de502005-10-03 14:02:39 -070015579 /* If we have an AMD 762 or VIA K8T800 chipset, write
15580 * reordering to the mailbox registers done by the host
15581 * controller can cause major troubles. We read back from
15582 * every mailbox register write to force the writes to be
15583 * posted to the chip in order.
15584 */
Matt Carlson41434702011-03-09 16:58:22 +000015585 if (pci_dev_present(tg3_write_reorder_chipsets) &&
Joe Perches63c3a662011-04-26 08:12:10 +000015586 !tg3_flag(tp, PCI_EXPRESS))
15587 tg3_flag_set(tp, MBOX_WRITE_REORDER);
Michael Chan399de502005-10-03 14:02:39 -070015588
Matt Carlson69fc4052008-12-21 20:19:57 -080015589 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15590 &tp->pci_cacheline_sz);
15591 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15592 &tp->pci_lat_timer);
Joe Perches41535772013-02-16 11:20:04 +000015593 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070015594 tp->pci_lat_timer < 64) {
15595 tp->pci_lat_timer = 64;
Matt Carlson69fc4052008-12-21 20:19:57 -080015596 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15597 tp->pci_lat_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015598 }
15599
Matt Carlson16821282011-07-13 09:27:28 +000015600 /* Important! -- It is critical that the PCI-X hw workaround
15601 * situation is decided before the first MMIO register access.
15602 */
Joe Perches41535772013-02-16 11:20:04 +000015603 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
Matt Carlson52f44902008-11-21 17:17:04 -080015604 /* 5700 BX chips need to have their TX producer index
15605 * mailboxes written twice to workaround a bug.
15606 */
Joe Perches63c3a662011-04-26 08:12:10 +000015607 tg3_flag_set(tp, TXD_MBOX_HWBUG);
Matt Carlson9974a352007-10-07 23:27:28 -070015608
Matt Carlson52f44902008-11-21 17:17:04 -080015609 /* If we are in PCI-X mode, enable register write workaround.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015610 *
15611 * The workaround is to use indirect register accesses
15612 * for all chip writes not to mailbox registers.
15613 */
Joe Perches63c3a662011-04-26 08:12:10 +000015614 if (tg3_flag(tp, PCIX_MODE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015615 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015616
Joe Perches63c3a662011-04-26 08:12:10 +000015617 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015618
15619 /* The chip can have it's power management PCI config
15620 * space registers clobbered due to this bug.
15621 * So explicitly force the chip into D0 here.
15622 */
Matt Carlson9974a352007-10-07 23:27:28 -070015623 pci_read_config_dword(tp->pdev,
15624 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070015625 &pm_reg);
15626 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15627 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070015628 pci_write_config_dword(tp->pdev,
15629 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070015630 pm_reg);
15631
15632 /* Also, force SERR#/PERR# in PCI command. */
15633 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15634 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15635 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15636 }
15637 }
15638
Linus Torvalds1da177e2005-04-16 15:20:36 -070015639 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
Joe Perches63c3a662011-04-26 08:12:10 +000015640 tg3_flag_set(tp, PCI_HIGH_SPEED);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015641 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
Joe Perches63c3a662011-04-26 08:12:10 +000015642 tg3_flag_set(tp, PCI_32BIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015643
15644 /* Chip-specific fixup from Broadcom driver */
Joe Perches41535772013-02-16 11:20:04 +000015645 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070015646 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15647 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15648 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15649 }
15650
Michael Chan1ee582d2005-08-09 20:16:46 -070015651 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070015652 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070015653 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070015654 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070015655 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070015656 tp->write32_tx_mbox = tg3_write32;
15657 tp->write32_rx_mbox = tg3_write32;
15658
15659 /* Various workaround register access methods */
Joe Perches63c3a662011-04-26 08:12:10 +000015660 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
Michael Chan1ee582d2005-08-09 20:16:46 -070015661 tp->write32 = tg3_write_indirect_reg32;
Joe Perches41535772013-02-16 11:20:04 +000015662 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
Joe Perches63c3a662011-04-26 08:12:10 +000015663 (tg3_flag(tp, PCI_EXPRESS) &&
Joe Perches41535772013-02-16 11:20:04 +000015664 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
Matt Carlson98efd8a2007-05-05 12:47:25 -070015665 /*
15666 * Back to back register writes can cause problems on these
15667 * chips, the workaround is to read back all reg writes
15668 * except those to mailbox regs.
15669 *
15670 * See tg3_write_indirect_reg32().
15671 */
Michael Chan1ee582d2005-08-09 20:16:46 -070015672 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070015673 }
15674
Joe Perches63c3a662011-04-26 08:12:10 +000015675 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
Michael Chan1ee582d2005-08-09 20:16:46 -070015676 tp->write32_tx_mbox = tg3_write32_tx_mbox;
Joe Perches63c3a662011-04-26 08:12:10 +000015677 if (tg3_flag(tp, MBOX_WRITE_REORDER))
Michael Chan1ee582d2005-08-09 20:16:46 -070015678 tp->write32_rx_mbox = tg3_write_flush_reg32;
15679 }
Michael Chan20094932005-08-09 20:16:32 -070015680
Joe Perches63c3a662011-04-26 08:12:10 +000015681 if (tg3_flag(tp, ICH_WORKAROUND)) {
Michael Chan68929142005-08-09 20:17:14 -070015682 tp->read32 = tg3_read_indirect_reg32;
15683 tp->write32 = tg3_write_indirect_reg32;
15684 tp->read32_mbox = tg3_read_indirect_mbox;
15685 tp->write32_mbox = tg3_write_indirect_mbox;
15686 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15687 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15688
15689 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070015690 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070015691
15692 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15693 pci_cmd &= ~PCI_COMMAND_MEMORY;
15694 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15695 }
Joe Perches41535772013-02-16 11:20:04 +000015696 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chanb5d37722006-09-27 16:06:21 -070015697 tp->read32_mbox = tg3_read32_mbox_5906;
15698 tp->write32_mbox = tg3_write32_mbox_5906;
15699 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15700 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15701 }
Michael Chan68929142005-08-09 20:17:14 -070015702
Michael Chanbbadf502006-04-06 21:46:34 -070015703 if (tp->write32 == tg3_write_indirect_reg32 ||
Joe Perches63c3a662011-04-26 08:12:10 +000015704 (tg3_flag(tp, PCIX_MODE) &&
Joe Perches41535772013-02-16 11:20:04 +000015705 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15706 tg3_asic_rev(tp) == ASIC_REV_5701)))
Joe Perches63c3a662011-04-26 08:12:10 +000015707 tg3_flag_set(tp, SRAM_USE_CONFIG);
Michael Chanbbadf502006-04-06 21:46:34 -070015708
Matt Carlson16821282011-07-13 09:27:28 +000015709 /* The memory arbiter has to be enabled in order for SRAM accesses
15710 * to succeed. Normally on powerup the tg3 chip firmware will make
15711 * sure it is enabled, but other entities such as system netboot
15712 * code might disable it.
15713 */
15714 val = tr32(MEMARB_MODE);
15715 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15716
Matt Carlson9dc5e342011-11-04 09:15:02 +000015717 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
Joe Perches41535772013-02-16 11:20:04 +000015718 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
Matt Carlson9dc5e342011-11-04 09:15:02 +000015719 tg3_flag(tp, 5780_CLASS)) {
15720 if (tg3_flag(tp, PCIX_MODE)) {
15721 pci_read_config_dword(tp->pdev,
15722 tp->pcix_cap + PCI_X_STATUS,
15723 &val);
15724 tp->pci_fn = val & 0x7;
15725 }
Joe Perches41535772013-02-16 11:20:04 +000015726 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15727 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15728 tg3_asic_rev(tp) == ASIC_REV_5720) {
Matt Carlson9dc5e342011-11-04 09:15:02 +000015729 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
Michael Chan857001f2013-01-06 12:51:09 +000015730 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15731 val = tr32(TG3_CPMU_STATUS);
15732
Joe Perches41535772013-02-16 11:20:04 +000015733 if (tg3_asic_rev(tp) == ASIC_REV_5717)
Michael Chan857001f2013-01-06 12:51:09 +000015734 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15735 else
Matt Carlson9dc5e342011-11-04 09:15:02 +000015736 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15737 TG3_CPMU_STATUS_FSHFT_5719;
Matt Carlson69f11c92011-07-13 09:27:30 +000015738 }
15739
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000015740 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15741 tp->write32_tx_mbox = tg3_write_flush_reg32;
15742 tp->write32_rx_mbox = tg3_write_flush_reg32;
15743 }
15744
Michael Chan7d0c41e2005-04-21 17:06:20 -070015745 /* Get eeprom hw config before calling tg3_set_power_state().
Joe Perches63c3a662011-04-26 08:12:10 +000015746 * In particular, the TG3_FLAG_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070015747 * determined before calling tg3_set_power_state() so that
15748 * we know whether or not to switch out of Vaux power.
15749 * When the flag is set, it means that GPIO1 is used for eeprom
15750 * write protect and also implies that it is a LOM where GPIOs
15751 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040015752 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070015753 tg3_get_eeprom_hw_cfg(tp);
15754
Matt Carlson1caf13e2013-03-06 17:02:29 +000015755 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
Matt Carlsoncf9ecf42011-11-28 09:41:03 +000015756 tg3_flag_clear(tp, TSO_CAPABLE);
15757 tg3_flag_clear(tp, TSO_BUG);
15758 tp->fw_needed = NULL;
15759 }
15760
Joe Perches63c3a662011-04-26 08:12:10 +000015761 if (tg3_flag(tp, ENABLE_APE)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070015762 /* Allow reads and writes to the
15763 * APE register and memory space.
15764 */
15765 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
Matt Carlsonf92d9dc12010-06-05 17:24:30 +000015766 PCISTATE_ALLOW_APE_SHMEM_WR |
15767 PCISTATE_ALLOW_APE_PSPACE_WR;
Matt Carlson0d3031d2007-10-10 18:02:43 -070015768 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15769 pci_state_reg);
Matt Carlsonc9cab242011-07-13 09:27:27 +000015770
15771 tg3_ape_lock_init(tp);
Matt Carlson0d3031d2007-10-10 18:02:43 -070015772 }
15773
Matt Carlson16821282011-07-13 09:27:28 +000015774 /* Set up tp->grc_local_ctrl before calling
15775 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15776 * will bring 5700's external PHY out of reset.
Michael Chan314fba32005-04-21 17:07:04 -070015777 * It is also used as eeprom write protect on LOMs.
15778 */
15779 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
Joe Perches41535772013-02-16 11:20:04 +000015780 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
Joe Perches63c3a662011-04-26 08:12:10 +000015781 tg3_flag(tp, EEPROM_WRITE_PROT))
Michael Chan314fba32005-04-21 17:07:04 -070015782 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15783 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070015784 /* Unused GPIO3 must be driven as output on 5752 because there
15785 * are no pull-up resistors on unused GPIO pins.
15786 */
Joe Perches41535772013-02-16 11:20:04 +000015787 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
Michael Chan3e7d83b2005-04-21 17:10:36 -070015788 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070015789
Joe Perches41535772013-02-16 11:20:04 +000015790 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15791 tg3_asic_rev(tp) == ASIC_REV_57780 ||
Matt Carlson55086ad2011-12-14 11:09:59 +000015792 tg3_flag(tp, 57765_CLASS))
Michael Chanaf36e6b2006-03-23 01:28:06 -080015793 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15794
Matt Carlson8d519ab2009-04-20 06:58:01 +000015795 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15796 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
Matt Carlson5f0c4a32008-06-09 15:41:12 -070015797 /* Turn off the debug UART. */
15798 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
Joe Perches63c3a662011-04-26 08:12:10 +000015799 if (tg3_flag(tp, IS_NIC))
Matt Carlson5f0c4a32008-06-09 15:41:12 -070015800 /* Keep VMain power. */
15801 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15802 GRC_LCLCTRL_GPIO_OUTPUT0;
15803 }
15804
Joe Perches41535772013-02-16 11:20:04 +000015805 if (tg3_asic_rev(tp) == ASIC_REV_5762)
Michael Chanc86a8562013-01-06 12:51:08 +000015806 tp->grc_local_ctrl |=
15807 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15808
Matt Carlson16821282011-07-13 09:27:28 +000015809 /* Switch out of Vaux if it is a NIC */
15810 tg3_pwrsrc_switch_to_vmain(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015811
Linus Torvalds1da177e2005-04-16 15:20:36 -070015812 /* Derive initial jumbo mode from MTU assigned in
15813 * ether_setup() via the alloc_etherdev() call
15814 */
Joe Perches63c3a662011-04-26 08:12:10 +000015815 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15816 tg3_flag_set(tp, JUMBO_RING_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015817
15818 /* Determine WakeOnLan speed to use. */
Joe Perches41535772013-02-16 11:20:04 +000015819 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15820 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15821 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15822 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
Joe Perches63c3a662011-04-26 08:12:10 +000015823 tg3_flag_clear(tp, WOL_SPEED_100MB);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015824 } else {
Joe Perches63c3a662011-04-26 08:12:10 +000015825 tg3_flag_set(tp, WOL_SPEED_100MB);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015826 }
15827
Joe Perches41535772013-02-16 11:20:04 +000015828 if (tg3_asic_rev(tp) == ASIC_REV_5906)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015829 tp->phy_flags |= TG3_PHYFLG_IS_FET;
Matt Carlson7f97a4b2009-08-25 10:10:03 +000015830
Linus Torvalds1da177e2005-04-16 15:20:36 -070015831 /* A few boards don't want Ethernet@WireSpeed phy feature */
Joe Perches41535772013-02-16 11:20:04 +000015832 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15833 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15834 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15835 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015836 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15837 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15838 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015839
Joe Perches41535772013-02-16 11:20:04 +000015840 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15841 tg3_chip_rev(tp) == CHIPREV_5704_AX)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015842 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
Joe Perches41535772013-02-16 11:20:04 +000015843 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015844 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015845
Joe Perches63c3a662011-04-26 08:12:10 +000015846 if (tg3_flag(tp, 5705_PLUS) &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015847 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
Joe Perches41535772013-02-16 11:20:04 +000015848 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15849 tg3_asic_rev(tp) != ASIC_REV_57780 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015850 !tg3_flag(tp, 57765_PLUS)) {
Joe Perches41535772013-02-16 11:20:04 +000015851 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15852 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15853 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15854 tg3_asic_rev(tp) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080015855 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15856 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015857 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080015858 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015859 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
Matt Carlson321d32a2008-11-21 17:22:19 -080015860 } else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015861 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
Michael Chanc424cb22006-04-29 18:56:34 -070015862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015863
Joe Perches41535772013-02-16 11:20:04 +000015864 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15865 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
Matt Carlsonb2a5c192008-04-03 21:44:44 -070015866 tp->phy_otp = tg3_read_otp_phycfg(tp);
15867 if (tp->phy_otp == 0)
15868 tp->phy_otp = TG3_OTP_DEFAULT;
15869 }
15870
Joe Perches63c3a662011-04-26 08:12:10 +000015871 if (tg3_flag(tp, CPMU_PRESENT))
Matt Carlson8ef21422008-05-02 16:47:53 -070015872 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15873 else
15874 tp->mi_mode = MAC_MI_MODE_BASE;
15875
Linus Torvalds1da177e2005-04-16 15:20:36 -070015876 tp->coalesce_mode = 0;
Joe Perches41535772013-02-16 11:20:04 +000015877 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15878 tg3_chip_rev(tp) != CHIPREV_5700_BX)
Linus Torvalds1da177e2005-04-16 15:20:36 -070015879 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15880
Matt Carlson4d958472011-04-20 07:57:35 +000015881 /* Set these bits to enable statistics workaround. */
Joe Perches41535772013-02-16 11:20:04 +000015882 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15883 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15884 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
Matt Carlson4d958472011-04-20 07:57:35 +000015885 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15886 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15887 }
15888
Joe Perches41535772013-02-16 11:20:04 +000015889 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15890 tg3_asic_rev(tp) == ASIC_REV_57780)
Joe Perches63c3a662011-04-26 08:12:10 +000015891 tg3_flag_set(tp, USE_PHYLIB);
Matt Carlson57e69832008-05-25 23:48:31 -070015892
Matt Carlson158d7ab2008-05-29 01:37:54 -070015893 err = tg3_mdio_init(tp);
15894 if (err)
15895 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015896
15897 /* Initialize data/descriptor byte/word swapping. */
15898 val = tr32(GRC_MODE);
Joe Perches41535772013-02-16 11:20:04 +000015899 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15900 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlsonf2096f92011-04-05 14:22:48 +000015901 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15902 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15903 GRC_MODE_B2HRX_ENABLE |
15904 GRC_MODE_HTX2B_ENABLE |
15905 GRC_MODE_HOST_STACKUP);
15906 else
15907 val &= GRC_MODE_HOST_STACKUP;
15908
Linus Torvalds1da177e2005-04-16 15:20:36 -070015909 tw32(GRC_MODE, val | tp->grc_mode);
15910
15911 tg3_switch_clocks(tp);
15912
15913 /* Clear this out for sanity. */
15914 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15915
15916 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15917 &pci_state_reg);
15918 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
Joe Perches63c3a662011-04-26 08:12:10 +000015919 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
Joe Perches41535772013-02-16 11:20:04 +000015920 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15921 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15922 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15923 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070015924 void __iomem *sram_base;
15925
15926 /* Write some dummy words into the SRAM status block
15927 * area, see if it reads back correctly. If the return
15928 * value is bad, force enable the PCIX workaround.
15929 */
15930 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15931
15932 writel(0x00000000, sram_base);
15933 writel(0x00000000, sram_base + 4);
15934 writel(0xffffffff, sram_base + 4);
15935 if (readl(sram_base) != 0x00000000)
Joe Perches63c3a662011-04-26 08:12:10 +000015936 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015937 }
15938 }
15939
15940 udelay(50);
15941 tg3_nvram_init(tp);
15942
Nithin Sujirc4dab502013-03-06 17:02:34 +000015943 /* If the device has an NVRAM, no need to load patch firmware */
15944 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15945 !tg3_flag(tp, NO_NVRAM))
15946 tp->fw_needed = NULL;
15947
Linus Torvalds1da177e2005-04-16 15:20:36 -070015948 grc_misc_cfg = tr32(GRC_MISC_CFG);
15949 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15950
Joe Perches41535772013-02-16 11:20:04 +000015951 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070015952 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15953 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
Joe Perches63c3a662011-04-26 08:12:10 +000015954 tg3_flag_set(tp, IS_5788);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015955
Joe Perches63c3a662011-04-26 08:12:10 +000015956 if (!tg3_flag(tp, IS_5788) &&
Joe Perches41535772013-02-16 11:20:04 +000015957 tg3_asic_rev(tp) != ASIC_REV_5700)
Joe Perches63c3a662011-04-26 08:12:10 +000015958 tg3_flag_set(tp, TAGGED_STATUS);
15959 if (tg3_flag(tp, TAGGED_STATUS)) {
David S. Millerfac9b832005-05-18 22:46:34 -070015960 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15961 HOSTCC_MODE_CLRTICK_TXBD);
15962
15963 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15964 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15965 tp->misc_host_ctrl);
15966 }
15967
Matt Carlson3bda1252008-08-15 14:08:22 -070015968 /* Preserve the APE MAC_MODE bits */
Joe Perches63c3a662011-04-26 08:12:10 +000015969 if (tg3_flag(tp, ENABLE_APE))
Matt Carlsond2394e6b2010-11-24 08:31:47 +000015970 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
Matt Carlson3bda1252008-08-15 14:08:22 -070015971 else
Matt Carlson6e01b202011-08-19 13:58:20 +000015972 tp->mac_mode = 0;
Matt Carlson3bda1252008-08-15 14:08:22 -070015973
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000015974 if (tg3_10_100_only_device(tp, ent))
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015975 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015976
15977 err = tg3_phy_probe(tp);
15978 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000015979 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015980 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070015981 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015982 }
15983
Matt Carlson184b8902010-04-05 10:19:25 +000015984 tg3_read_vpd(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080015985 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015986
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015987 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15988 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015989 } else {
Joe Perches41535772013-02-16 11:20:04 +000015990 if (tg3_asic_rev(tp) == ASIC_REV_5700)
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015991 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015992 else
Matt Carlsonf07e9af2010-08-02 11:26:07 +000015993 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015994 }
15995
15996 /* 5700 {AX,BX} chips have a broken status block link
15997 * change bit implementation, so we must use the
15998 * status register in those cases.
15999 */
Joe Perches41535772013-02-16 11:20:04 +000016000 if (tg3_asic_rev(tp) == ASIC_REV_5700)
Joe Perches63c3a662011-04-26 08:12:10 +000016001 tg3_flag_set(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016002 else
Joe Perches63c3a662011-04-26 08:12:10 +000016003 tg3_flag_clear(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016004
16005 /* The led_ctrl is set during tg3_phy_probe, here we might
16006 * have to force the link status polling mechanism based
16007 * upon subsystem IDs.
16008 */
16009 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Joe Perches41535772013-02-16 11:20:04 +000016010 tg3_asic_rev(tp) == ASIC_REV_5701 &&
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016011 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16012 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
Joe Perches63c3a662011-04-26 08:12:10 +000016013 tg3_flag_set(tp, USE_LINKCHG_REG);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016014 }
16015
16016 /* For all SERDES we poll the MAC status register. */
Matt Carlsonf07e9af2010-08-02 11:26:07 +000016017 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
Joe Perches63c3a662011-04-26 08:12:10 +000016018 tg3_flag_set(tp, POLL_SERDES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016019 else
Joe Perches63c3a662011-04-26 08:12:10 +000016020 tg3_flag_clear(tp, POLL_SERDES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016021
Eric Dumazet9205fd92011-11-18 06:47:01 +000016022 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
Matt Carlsond2757fc2010-04-12 06:58:27 +000016023 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
Joe Perches41535772013-02-16 11:20:04 +000016024 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
Joe Perches63c3a662011-04-26 08:12:10 +000016025 tg3_flag(tp, PCIX_MODE)) {
Eric Dumazet9205fd92011-11-18 06:47:01 +000016026 tp->rx_offset = NET_SKB_PAD;
Matt Carlsond2757fc2010-04-12 06:58:27 +000016027#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
Matt Carlson9dc7a112010-04-12 06:58:28 +000016028 tp->rx_copy_thresh = ~(u16)0;
Matt Carlsond2757fc2010-04-12 06:58:27 +000016029#endif
16030 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016031
Matt Carlson2c49a442010-09-30 10:34:35 +000016032 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16033 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
Matt Carlson7cb32cf2010-09-30 10:34:36 +000016034 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16035
Matt Carlson2c49a442010-09-30 10:34:35 +000016036 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
Michael Chanf92905d2006-06-29 20:14:29 -070016037
16038 /* Increment the rx prod index on the rx std ring by at most
16039 * 8 for these chips to workaround hw errata.
16040 */
Joe Perches41535772013-02-16 11:20:04 +000016041 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16042 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16043 tg3_asic_rev(tp) == ASIC_REV_5755)
Michael Chanf92905d2006-06-29 20:14:29 -070016044 tp->rx_std_max_post = 8;
16045
Joe Perches63c3a662011-04-26 08:12:10 +000016046 if (tg3_flag(tp, ASPM_WORKAROUND))
Matt Carlson8ed5d972007-05-07 00:25:49 -070016047 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16048 PCIE_PWR_MGMT_L1_THRESH_MSK;
16049
Linus Torvalds1da177e2005-04-16 15:20:36 -070016050 return err;
16051}
16052
David S. Miller49b6e95f2007-03-29 01:38:42 -070016053#ifdef CONFIG_SPARC
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016054static int tg3_get_macaddr_sparc(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016055{
16056 struct net_device *dev = tp->dev;
16057 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070016058 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070016059 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070016060 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016061
David S. Miller49b6e95f2007-03-29 01:38:42 -070016062 addr = of_get_property(dp, "local-mac-address", &len);
16063 if (addr && len == 6) {
16064 memcpy(dev->dev_addr, addr, 6);
David S. Miller49b6e95f2007-03-29 01:38:42 -070016065 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016066 }
16067 return -ENODEV;
16068}
16069
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016070static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016071{
16072 struct net_device *dev = tp->dev;
16073
16074 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16075 return 0;
16076}
16077#endif
16078
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016079static int tg3_get_device_address(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016080{
16081 struct net_device *dev = tp->dev;
16082 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080016083 int addr_ok = 0;
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000016084 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016085
David S. Miller49b6e95f2007-03-29 01:38:42 -070016086#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070016087 if (!tg3_get_macaddr_sparc(tp))
16088 return 0;
16089#endif
16090
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000016091 if (tg3_flag(tp, IS_SSB_CORE)) {
16092 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16093 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16094 return 0;
16095 }
16096
Linus Torvalds1da177e2005-04-16 15:20:36 -070016097 mac_offset = 0x7c;
Joe Perches41535772013-02-16 11:20:04 +000016098 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
Joe Perches63c3a662011-04-26 08:12:10 +000016099 tg3_flag(tp, 5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070016100 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16101 mac_offset = 0xcc;
16102 if (tg3_nvram_lock(tp))
16103 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16104 else
16105 tg3_nvram_unlock(tp);
Joe Perches63c3a662011-04-26 08:12:10 +000016106 } else if (tg3_flag(tp, 5717_PLUS)) {
Matt Carlson69f11c92011-07-13 09:27:30 +000016107 if (tp->pci_fn & 1)
Matt Carlsona1b950d2009-09-01 13:20:17 +000016108 mac_offset = 0xcc;
Matt Carlson69f11c92011-07-13 09:27:30 +000016109 if (tp->pci_fn > 1)
Matt Carlsona50d0792010-06-05 17:24:37 +000016110 mac_offset += 0x18c;
Joe Perches41535772013-02-16 11:20:04 +000016111 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
Michael Chanb5d37722006-09-27 16:06:21 -070016112 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016113
16114 /* First try to get it from MAC address mailbox. */
16115 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16116 if ((hi >> 16) == 0x484b) {
16117 dev->dev_addr[0] = (hi >> 8) & 0xff;
16118 dev->dev_addr[1] = (hi >> 0) & 0xff;
16119
16120 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16121 dev->dev_addr[2] = (lo >> 24) & 0xff;
16122 dev->dev_addr[3] = (lo >> 16) & 0xff;
16123 dev->dev_addr[4] = (lo >> 8) & 0xff;
16124 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016125
Michael Chan008652b2006-03-27 23:14:53 -080016126 /* Some old bootcode may report a 0 MAC address in SRAM */
16127 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16128 }
16129 if (!addr_ok) {
16130 /* Next, try NVRAM. */
Joe Perches63c3a662011-04-26 08:12:10 +000016131 if (!tg3_flag(tp, NO_NVRAM) &&
Matt Carlsondf259d82009-04-20 06:57:14 +000016132 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
Matt Carlson6d348f22009-02-25 14:25:52 +000016133 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
Matt Carlson62cedd12009-04-20 14:52:29 -070016134 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16135 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
Michael Chan008652b2006-03-27 23:14:53 -080016136 }
16137 /* Finally just fetch it out of the MAC control regs. */
16138 else {
16139 hi = tr32(MAC_ADDR_0_HIGH);
16140 lo = tr32(MAC_ADDR_0_LOW);
16141
16142 dev->dev_addr[5] = lo & 0xff;
16143 dev->dev_addr[4] = (lo >> 8) & 0xff;
16144 dev->dev_addr[3] = (lo >> 16) & 0xff;
16145 dev->dev_addr[2] = (lo >> 24) & 0xff;
16146 dev->dev_addr[1] = hi & 0xff;
16147 dev->dev_addr[0] = (hi >> 8) & 0xff;
16148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016149 }
16150
16151 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070016152#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070016153 if (!tg3_get_default_macaddr_sparc(tp))
16154 return 0;
16155#endif
16156 return -EINVAL;
16157 }
16158 return 0;
16159}
16160
David S. Miller59e6b432005-05-18 22:50:10 -070016161#define BOUNDARY_SINGLE_CACHELINE 1
16162#define BOUNDARY_MULTI_CACHELINE 2
16163
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016164static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
David S. Miller59e6b432005-05-18 22:50:10 -070016165{
16166 int cacheline_size;
16167 u8 byte;
16168 int goal;
16169
16170 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16171 if (byte == 0)
16172 cacheline_size = 1024;
16173 else
16174 cacheline_size = (int) byte * 4;
16175
16176 /* On 5703 and later chips, the boundary bits have no
16177 * effect.
16178 */
Joe Perches41535772013-02-16 11:20:04 +000016179 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16180 tg3_asic_rev(tp) != ASIC_REV_5701 &&
Joe Perches63c3a662011-04-26 08:12:10 +000016181 !tg3_flag(tp, PCI_EXPRESS))
David S. Miller59e6b432005-05-18 22:50:10 -070016182 goto out;
16183
16184#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16185 goal = BOUNDARY_MULTI_CACHELINE;
16186#else
16187#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16188 goal = BOUNDARY_SINGLE_CACHELINE;
16189#else
16190 goal = 0;
16191#endif
16192#endif
16193
Joe Perches63c3a662011-04-26 08:12:10 +000016194 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000016195 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16196 goto out;
16197 }
16198
David S. Miller59e6b432005-05-18 22:50:10 -070016199 if (!goal)
16200 goto out;
16201
16202 /* PCI controllers on most RISC systems tend to disconnect
16203 * when a device tries to burst across a cache-line boundary.
16204 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16205 *
16206 * Unfortunately, for PCI-E there are only limited
16207 * write-side controls for this, and thus for reads
16208 * we will still get the disconnects. We'll also waste
16209 * these PCI cycles for both read and write for chips
16210 * other than 5700 and 5701 which do not implement the
16211 * boundary bits.
16212 */
Joe Perches63c3a662011-04-26 08:12:10 +000016213 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
David S. Miller59e6b432005-05-18 22:50:10 -070016214 switch (cacheline_size) {
16215 case 16:
16216 case 32:
16217 case 64:
16218 case 128:
16219 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16220 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16221 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16222 } else {
16223 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16224 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16225 }
16226 break;
16227
16228 case 256:
16229 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16230 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16231 break;
16232
16233 default:
16234 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16235 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16236 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070016237 }
Joe Perches63c3a662011-04-26 08:12:10 +000016238 } else if (tg3_flag(tp, PCI_EXPRESS)) {
David S. Miller59e6b432005-05-18 22:50:10 -070016239 switch (cacheline_size) {
16240 case 16:
16241 case 32:
16242 case 64:
16243 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16244 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16245 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16246 break;
16247 }
16248 /* fallthrough */
16249 case 128:
16250 default:
16251 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16252 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16253 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070016254 }
David S. Miller59e6b432005-05-18 22:50:10 -070016255 } else {
16256 switch (cacheline_size) {
16257 case 16:
16258 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16259 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16260 DMA_RWCTRL_WRITE_BNDRY_16);
16261 break;
16262 }
16263 /* fallthrough */
16264 case 32:
16265 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16266 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16267 DMA_RWCTRL_WRITE_BNDRY_32);
16268 break;
16269 }
16270 /* fallthrough */
16271 case 64:
16272 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16273 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16274 DMA_RWCTRL_WRITE_BNDRY_64);
16275 break;
16276 }
16277 /* fallthrough */
16278 case 128:
16279 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16280 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16281 DMA_RWCTRL_WRITE_BNDRY_128);
16282 break;
16283 }
16284 /* fallthrough */
16285 case 256:
16286 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16287 DMA_RWCTRL_WRITE_BNDRY_256);
16288 break;
16289 case 512:
16290 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16291 DMA_RWCTRL_WRITE_BNDRY_512);
16292 break;
16293 case 1024:
16294 default:
16295 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16296 DMA_RWCTRL_WRITE_BNDRY_1024);
16297 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070016298 }
David S. Miller59e6b432005-05-18 22:50:10 -070016299 }
16300
16301out:
16302 return val;
16303}
16304
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016305static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16306 int size, int to_device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016307{
16308 struct tg3_internal_buffer_desc test_desc;
16309 u32 sram_dma_descs;
16310 int i, ret;
16311
16312 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16313
16314 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16315 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16316 tw32(RDMAC_STATUS, 0);
16317 tw32(WDMAC_STATUS, 0);
16318
16319 tw32(BUFMGR_MODE, 0);
16320 tw32(FTQ_RESET, 0);
16321
16322 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16323 test_desc.addr_lo = buf_dma & 0xffffffff;
16324 test_desc.nic_mbuf = 0x00002100;
16325 test_desc.len = size;
16326
16327 /*
16328 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16329 * the *second* time the tg3 driver was getting loaded after an
16330 * initial scan.
16331 *
16332 * Broadcom tells me:
16333 * ...the DMA engine is connected to the GRC block and a DMA
16334 * reset may affect the GRC block in some unpredictable way...
16335 * The behavior of resets to individual blocks has not been tested.
16336 *
16337 * Broadcom noted the GRC reset will also reset all sub-components.
16338 */
16339 if (to_device) {
16340 test_desc.cqid_sqid = (13 << 8) | 2;
16341
16342 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16343 udelay(40);
16344 } else {
16345 test_desc.cqid_sqid = (16 << 8) | 7;
16346
16347 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16348 udelay(40);
16349 }
16350 test_desc.flags = 0x00000005;
16351
16352 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16353 u32 val;
16354
16355 val = *(((u32 *)&test_desc) + i);
16356 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16357 sram_dma_descs + (i * sizeof(u32)));
16358 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16359 }
16360 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16361
Matt Carlson859a588792010-04-05 10:19:28 +000016362 if (to_device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016363 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
Matt Carlson859a588792010-04-05 10:19:28 +000016364 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070016365 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016366
16367 ret = -ENODEV;
16368 for (i = 0; i < 40; i++) {
16369 u32 val;
16370
16371 if (to_device)
16372 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16373 else
16374 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16375 if ((val & 0xffff) == sram_dma_descs) {
16376 ret = 0;
16377 break;
16378 }
16379
16380 udelay(100);
16381 }
16382
16383 return ret;
16384}
16385
David S. Millerded73402005-05-23 13:59:47 -070016386#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070016387
Matt Carlson41434702011-03-09 16:58:22 +000016388static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
Joe Perches895950c2010-12-21 02:16:08 -080016389 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16390 { },
16391};
16392
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016393static int tg3_test_dma(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016394{
16395 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070016396 u32 *buf, saved_dma_rwctrl;
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000016397 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016398
Matt Carlson4bae65c2010-11-24 08:31:52 +000016399 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16400 &buf_dma, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016401 if (!buf) {
16402 ret = -ENOMEM;
16403 goto out_nofree;
16404 }
16405
16406 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16407 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16408
David S. Miller59e6b432005-05-18 22:50:10 -070016409 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016410
Joe Perches63c3a662011-04-26 08:12:10 +000016411 if (tg3_flag(tp, 57765_PLUS))
Matt Carlsoncbf9ca62009-11-13 13:03:40 +000016412 goto out;
16413
Joe Perches63c3a662011-04-26 08:12:10 +000016414 if (tg3_flag(tp, PCI_EXPRESS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070016415 /* DMA read watermark not used on PCIE */
16416 tp->dma_rwctrl |= 0x00180000;
Joe Perches63c3a662011-04-26 08:12:10 +000016417 } else if (!tg3_flag(tp, PCIX_MODE)) {
Joe Perches41535772013-02-16 11:20:04 +000016418 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16419 tg3_asic_rev(tp) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016420 tp->dma_rwctrl |= 0x003f0000;
16421 else
16422 tp->dma_rwctrl |= 0x003f000f;
16423 } else {
Joe Perches41535772013-02-16 11:20:04 +000016424 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16425 tg3_asic_rev(tp) == ASIC_REV_5704) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070016426 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080016427 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016428
Michael Chan4a29cc22006-03-19 13:21:12 -080016429 /* If the 5704 is behind the EPB bridge, we can
16430 * do the less restrictive ONE_DMA workaround for
16431 * better performance.
16432 */
Joe Perches63c3a662011-04-26 08:12:10 +000016433 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
Joe Perches41535772013-02-16 11:20:04 +000016434 tg3_asic_rev(tp) == ASIC_REV_5704)
Michael Chan4a29cc22006-03-19 13:21:12 -080016435 tp->dma_rwctrl |= 0x8000;
16436 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016437 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16438
Joe Perches41535772013-02-16 11:20:04 +000016439 if (tg3_asic_rev(tp) == ASIC_REV_5703)
Michael Chan49afdeb2007-02-13 12:17:03 -080016440 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070016441 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080016442 tp->dma_rwctrl |=
16443 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16444 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16445 (1 << 23);
Joe Perches41535772013-02-16 11:20:04 +000016446 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
Michael Chan4cf78e42005-07-25 12:29:19 -070016447 /* 5780 always in PCIX mode */
16448 tp->dma_rwctrl |= 0x00144000;
Joe Perches41535772013-02-16 11:20:04 +000016449 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
Michael Chana4e2b342005-10-26 15:46:52 -070016450 /* 5714 always in PCIX mode */
16451 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016452 } else {
16453 tp->dma_rwctrl |= 0x001b000f;
16454 }
16455 }
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000016456 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16457 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016458
Joe Perches41535772013-02-16 11:20:04 +000016459 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16460 tg3_asic_rev(tp) == ASIC_REV_5704)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016461 tp->dma_rwctrl &= 0xfffffff0;
16462
Joe Perches41535772013-02-16 11:20:04 +000016463 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16464 tg3_asic_rev(tp) == ASIC_REV_5701) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070016465 /* Remove this if it causes problems for some boards. */
16466 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16467
16468 /* On 5700/5701 chips, we need to set this bit.
16469 * Otherwise the chip will issue cacheline transactions
16470 * to streamable DMA memory with not all the byte
16471 * enables turned on. This is an error on several
16472 * RISC PCI controllers, in particular sparc64.
16473 *
16474 * On 5703/5704 chips, this bit has been reassigned
16475 * a different meaning. In particular, it is used
16476 * on those chips to enable a PCI-X workaround.
16477 */
16478 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16479 }
16480
16481 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16482
16483#if 0
16484 /* Unneeded, already done by tg3_get_invariants. */
16485 tg3_switch_clocks(tp);
16486#endif
16487
Joe Perches41535772013-02-16 11:20:04 +000016488 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16489 tg3_asic_rev(tp) != ASIC_REV_5701)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016490 goto out;
16491
David S. Miller59e6b432005-05-18 22:50:10 -070016492 /* It is best to perform DMA test with maximum write burst size
16493 * to expose the 5700/5701 write DMA bug.
16494 */
16495 saved_dma_rwctrl = tp->dma_rwctrl;
16496 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16497 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16498
Linus Torvalds1da177e2005-04-16 15:20:36 -070016499 while (1) {
16500 u32 *p = buf, i;
16501
16502 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16503 p[i] = i;
16504
16505 /* Send the buffer to the chip. */
16506 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16507 if (ret) {
Matt Carlson2445e462010-04-05 10:19:21 +000016508 dev_err(&tp->pdev->dev,
16509 "%s: Buffer write failed. err = %d\n",
16510 __func__, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016511 break;
16512 }
16513
16514#if 0
16515 /* validate data reached card RAM correctly. */
16516 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16517 u32 val;
16518 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16519 if (le32_to_cpu(val) != p[i]) {
Matt Carlson2445e462010-04-05 10:19:21 +000016520 dev_err(&tp->pdev->dev,
16521 "%s: Buffer corrupted on device! "
16522 "(%d != %d)\n", __func__, val, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016523 /* ret = -ENODEV here? */
16524 }
16525 p[i] = 0;
16526 }
16527#endif
16528 /* Now read it back. */
16529 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16530 if (ret) {
Matt Carlson5129c3a2010-04-05 10:19:23 +000016531 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16532 "err = %d\n", __func__, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016533 break;
16534 }
16535
16536 /* Verify it. */
16537 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16538 if (p[i] == i)
16539 continue;
16540
David S. Miller59e6b432005-05-18 22:50:10 -070016541 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16542 DMA_RWCTRL_WRITE_BNDRY_16) {
16543 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016544 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16545 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16546 break;
16547 } else {
Matt Carlson2445e462010-04-05 10:19:21 +000016548 dev_err(&tp->pdev->dev,
16549 "%s: Buffer corrupted on read back! "
16550 "(%d != %d)\n", __func__, p[i], i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016551 ret = -ENODEV;
16552 goto out;
16553 }
16554 }
16555
16556 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16557 /* Success. */
16558 ret = 0;
16559 break;
16560 }
16561 }
David S. Miller59e6b432005-05-18 22:50:10 -070016562 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16563 DMA_RWCTRL_WRITE_BNDRY_16) {
16564 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070016565 * now look for chipsets that are known to expose the
16566 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070016567 */
Matt Carlson41434702011-03-09 16:58:22 +000016568 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070016569 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16570 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
Matt Carlson859a588792010-04-05 10:19:28 +000016571 } else {
Michael Chan6d1cfba2005-06-08 14:13:14 -070016572 /* Safe to use the calculated DMA boundary. */
16573 tp->dma_rwctrl = saved_dma_rwctrl;
Matt Carlson859a588792010-04-05 10:19:28 +000016574 }
Michael Chan6d1cfba2005-06-08 14:13:14 -070016575
David S. Miller59e6b432005-05-18 22:50:10 -070016576 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016578
16579out:
Matt Carlson4bae65c2010-11-24 08:31:52 +000016580 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016581out_nofree:
16582 return ret;
16583}
16584
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016585static void tg3_init_bufmgr_config(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016586{
Joe Perches63c3a662011-04-26 08:12:10 +000016587 if (tg3_flag(tp, 57765_PLUS)) {
Matt Carlson666bc832010-01-20 16:58:03 +000016588 tp->bufmgr_config.mbuf_read_dma_low_water =
16589 DEFAULT_MB_RDMA_LOW_WATER_5705;
16590 tp->bufmgr_config.mbuf_mac_rx_low_water =
16591 DEFAULT_MB_MACRX_LOW_WATER_57765;
16592 tp->bufmgr_config.mbuf_high_water =
16593 DEFAULT_MB_HIGH_WATER_57765;
16594
16595 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16596 DEFAULT_MB_RDMA_LOW_WATER_5705;
16597 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16598 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16599 tp->bufmgr_config.mbuf_high_water_jumbo =
16600 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
Joe Perches63c3a662011-04-26 08:12:10 +000016601 } else if (tg3_flag(tp, 5705_PLUS)) {
Michael Chanfdfec1722005-07-25 12:31:48 -070016602 tp->bufmgr_config.mbuf_read_dma_low_water =
16603 DEFAULT_MB_RDMA_LOW_WATER_5705;
16604 tp->bufmgr_config.mbuf_mac_rx_low_water =
16605 DEFAULT_MB_MACRX_LOW_WATER_5705;
16606 tp->bufmgr_config.mbuf_high_water =
16607 DEFAULT_MB_HIGH_WATER_5705;
Joe Perches41535772013-02-16 11:20:04 +000016608 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
Michael Chanb5d37722006-09-27 16:06:21 -070016609 tp->bufmgr_config.mbuf_mac_rx_low_water =
16610 DEFAULT_MB_MACRX_LOW_WATER_5906;
16611 tp->bufmgr_config.mbuf_high_water =
16612 DEFAULT_MB_HIGH_WATER_5906;
16613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016614
Michael Chanfdfec1722005-07-25 12:31:48 -070016615 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16616 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16617 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16618 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16619 tp->bufmgr_config.mbuf_high_water_jumbo =
16620 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16621 } else {
16622 tp->bufmgr_config.mbuf_read_dma_low_water =
16623 DEFAULT_MB_RDMA_LOW_WATER;
16624 tp->bufmgr_config.mbuf_mac_rx_low_water =
16625 DEFAULT_MB_MACRX_LOW_WATER;
16626 tp->bufmgr_config.mbuf_high_water =
16627 DEFAULT_MB_HIGH_WATER;
16628
16629 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16630 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16631 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16632 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16633 tp->bufmgr_config.mbuf_high_water_jumbo =
16634 DEFAULT_MB_HIGH_WATER_JUMBO;
16635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016636
16637 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16638 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16639}
16640
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016641static char *tg3_phy_string(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016642{
Matt Carlson79eb6902010-02-17 15:17:03 +000016643 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16644 case TG3_PHY_ID_BCM5400: return "5400";
16645 case TG3_PHY_ID_BCM5401: return "5401";
16646 case TG3_PHY_ID_BCM5411: return "5411";
16647 case TG3_PHY_ID_BCM5701: return "5701";
16648 case TG3_PHY_ID_BCM5703: return "5703";
16649 case TG3_PHY_ID_BCM5704: return "5704";
16650 case TG3_PHY_ID_BCM5705: return "5705";
16651 case TG3_PHY_ID_BCM5750: return "5750";
16652 case TG3_PHY_ID_BCM5752: return "5752";
16653 case TG3_PHY_ID_BCM5714: return "5714";
16654 case TG3_PHY_ID_BCM5780: return "5780";
16655 case TG3_PHY_ID_BCM5755: return "5755";
16656 case TG3_PHY_ID_BCM5787: return "5787";
16657 case TG3_PHY_ID_BCM5784: return "5784";
16658 case TG3_PHY_ID_BCM5756: return "5722/5756";
16659 case TG3_PHY_ID_BCM5906: return "5906";
16660 case TG3_PHY_ID_BCM5761: return "5761";
16661 case TG3_PHY_ID_BCM5718C: return "5718C";
16662 case TG3_PHY_ID_BCM5718S: return "5718S";
16663 case TG3_PHY_ID_BCM57765: return "57765";
Matt Carlson302b5002010-06-05 17:24:38 +000016664 case TG3_PHY_ID_BCM5719C: return "5719C";
Matt Carlson6418f2c2011-04-05 14:22:49 +000016665 case TG3_PHY_ID_BCM5720C: return "5720C";
Michael Chanc65a17f2013-01-06 12:51:07 +000016666 case TG3_PHY_ID_BCM5762: return "5762C";
Matt Carlson79eb6902010-02-17 15:17:03 +000016667 case TG3_PHY_ID_BCM8002: return "8002/serdes";
Linus Torvalds1da177e2005-04-16 15:20:36 -070016668 case 0: return "serdes";
16669 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070016670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016671}
16672
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016673static char *tg3_bus_string(struct tg3 *tp, char *str)
Michael Chanf9804dd2005-09-27 12:13:10 -070016674{
Joe Perches63c3a662011-04-26 08:12:10 +000016675 if (tg3_flag(tp, PCI_EXPRESS)) {
Michael Chanf9804dd2005-09-27 12:13:10 -070016676 strcpy(str, "PCI Express");
16677 return str;
Joe Perches63c3a662011-04-26 08:12:10 +000016678 } else if (tg3_flag(tp, PCIX_MODE)) {
Michael Chanf9804dd2005-09-27 12:13:10 -070016679 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16680
16681 strcpy(str, "PCIX:");
16682
16683 if ((clock_ctrl == 7) ||
16684 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16685 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16686 strcat(str, "133MHz");
16687 else if (clock_ctrl == 0)
16688 strcat(str, "33MHz");
16689 else if (clock_ctrl == 2)
16690 strcat(str, "50MHz");
16691 else if (clock_ctrl == 4)
16692 strcat(str, "66MHz");
16693 else if (clock_ctrl == 6)
16694 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070016695 } else {
16696 strcpy(str, "PCI:");
Joe Perches63c3a662011-04-26 08:12:10 +000016697 if (tg3_flag(tp, PCI_HIGH_SPEED))
Michael Chanf9804dd2005-09-27 12:13:10 -070016698 strcat(str, "66MHz");
16699 else
16700 strcat(str, "33MHz");
16701 }
Joe Perches63c3a662011-04-26 08:12:10 +000016702 if (tg3_flag(tp, PCI_32BIT))
Michael Chanf9804dd2005-09-27 12:13:10 -070016703 strcat(str, ":32-bit");
16704 else
16705 strcat(str, ":64-bit");
16706 return str;
16707}
16708
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016709static void tg3_init_coal(struct tg3 *tp)
David S. Miller15f98502005-05-18 22:49:26 -070016710{
16711 struct ethtool_coalesce *ec = &tp->coal;
16712
16713 memset(ec, 0, sizeof(*ec));
16714 ec->cmd = ETHTOOL_GCOALESCE;
16715 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16716 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16717 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16718 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16719 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16720 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16721 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16722 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16723 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16724
16725 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16726 HOSTCC_MODE_CLRTICK_TXBD)) {
16727 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16728 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16729 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16730 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16731 }
Michael Chand244c892005-07-05 14:42:33 -070016732
Joe Perches63c3a662011-04-26 08:12:10 +000016733 if (tg3_flag(tp, 5705_PLUS)) {
Michael Chand244c892005-07-05 14:42:33 -070016734 ec->rx_coalesce_usecs_irq = 0;
16735 ec->tx_coalesce_usecs_irq = 0;
16736 ec->stats_block_coalesce_usecs = 0;
16737 }
David S. Miller15f98502005-05-18 22:49:26 -070016738}
16739
Bill Pemberton229b1ad2012-12-03 09:22:59 -050016740static int tg3_init_one(struct pci_dev *pdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -070016741 const struct pci_device_id *ent)
16742{
Linus Torvalds1da177e2005-04-16 15:20:36 -070016743 struct net_device *dev;
16744 struct tg3 *tp;
Matt Carlson646c9ed2009-09-01 12:58:41 +000016745 int i, err, pm_cap;
16746 u32 sndmbx, rcvmbx, intmbx;
Michael Chanf9804dd2005-09-27 12:13:10 -070016747 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080016748 u64 dma_mask, persist_dma_mask;
Michał Mirosławc8f44af2011-11-15 15:29:55 +000016749 netdev_features_t features = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016750
Joe Perches05dbe002010-02-17 19:44:19 +000016751 printk_once(KERN_INFO "%s\n", version);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016752
16753 err = pci_enable_device(pdev);
16754 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000016755 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016756 return err;
16757 }
16758
Linus Torvalds1da177e2005-04-16 15:20:36 -070016759 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16760 if (err) {
Matt Carlson2445e462010-04-05 10:19:21 +000016761 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016762 goto err_out_disable_pdev;
16763 }
16764
16765 pci_set_master(pdev);
16766
16767 /* Find power-management capability. */
16768 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16769 if (pm_cap == 0) {
Matt Carlson2445e462010-04-05 10:19:21 +000016770 dev_err(&pdev->dev,
16771 "Cannot find Power Management capability, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016772 err = -EIO;
16773 goto err_out_free_res;
16774 }
16775
Matt Carlson16821282011-07-13 09:27:28 +000016776 err = pci_set_power_state(pdev, PCI_D0);
16777 if (err) {
16778 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16779 goto err_out_free_res;
16780 }
16781
Matt Carlsonfe5f5782009-09-01 13:09:39 +000016782 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016783 if (!dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070016784 err = -ENOMEM;
Matt Carlson16821282011-07-13 09:27:28 +000016785 goto err_out_power_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016786 }
16787
Linus Torvalds1da177e2005-04-16 15:20:36 -070016788 SET_NETDEV_DEV(dev, &pdev->dev);
16789
Linus Torvalds1da177e2005-04-16 15:20:36 -070016790 tp = netdev_priv(dev);
16791 tp->pdev = pdev;
16792 tp->dev = dev;
16793 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016794 tp->rx_mode = TG3_DEF_RX_MODE;
16795 tp->tx_mode = TG3_DEF_TX_MODE;
Nithin Nayak Sujir9c13cb82013-01-14 17:10:59 +000016796 tp->irq_sync = 1;
Matt Carlson8ef21422008-05-02 16:47:53 -070016797
Linus Torvalds1da177e2005-04-16 15:20:36 -070016798 if (tg3_debug > 0)
16799 tp->msg_enable = tg3_debug;
16800 else
16801 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16802
Hauke Mehrtens7e6c63f2013-02-07 05:37:39 +000016803 if (pdev_is_ssb_gige_core(pdev)) {
16804 tg3_flag_set(tp, IS_SSB_CORE);
16805 if (ssb_gige_must_flush_posted_writes(pdev))
16806 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16807 if (ssb_gige_one_dma_at_once(pdev))
16808 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16809 if (ssb_gige_have_roboswitch(pdev))
16810 tg3_flag_set(tp, ROBOSWITCH);
16811 if (ssb_gige_is_rgmii(pdev))
16812 tg3_flag_set(tp, RGMII_MODE);
16813 }
16814
Linus Torvalds1da177e2005-04-16 15:20:36 -070016815 /* The word/byte swap controls here control register access byte
16816 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16817 * setting below.
16818 */
16819 tp->misc_host_ctrl =
16820 MISC_HOST_CTRL_MASK_PCI_INT |
16821 MISC_HOST_CTRL_WORD_SWAP |
16822 MISC_HOST_CTRL_INDIR_ACCESS |
16823 MISC_HOST_CTRL_PCISTATE_RW;
16824
16825 /* The NONFRM (non-frame) byte/word swap controls take effect
16826 * on descriptor entries, anything which isn't packet data.
16827 *
16828 * The StrongARM chips on the board (one for tx, one for rx)
16829 * are running in big-endian mode.
16830 */
16831 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16832 GRC_MODE_WSWAP_NONFRM_DATA);
16833#ifdef __BIG_ENDIAN
16834 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16835#endif
16836 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016837 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000016838 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016839
Matt Carlsond5fe4882008-11-21 17:20:32 -080016840 tp->regs = pci_ioremap_bar(pdev, BAR_0);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010016841 if (!tp->regs) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016842 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070016843 err = -ENOMEM;
16844 goto err_out_free_dev;
16845 }
16846
Matt Carlsonc9cab242011-07-13 09:27:27 +000016847 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16848 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16849 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16850 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16851 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
Michael Chan79d49692012-11-05 14:26:29 +000016852 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
Matt Carlsonc9cab242011-07-13 09:27:27 +000016853 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16854 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
Michael Chanc65a17f2013-01-06 12:51:07 +000016855 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16856 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16857 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16858 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
Matt Carlsonc9cab242011-07-13 09:27:27 +000016859 tg3_flag_set(tp, ENABLE_APE);
16860 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16861 if (!tp->aperegs) {
16862 dev_err(&pdev->dev,
16863 "Cannot map APE registers, aborting\n");
16864 err = -ENOMEM;
16865 goto err_out_iounmap;
16866 }
16867 }
16868
Linus Torvalds1da177e2005-04-16 15:20:36 -070016869 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16870 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016871
Linus Torvalds1da177e2005-04-16 15:20:36 -070016872 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016873 dev->watchdog_timeo = TG3_TX_TIMEOUT;
Matt Carlson2ffcc982011-05-19 12:12:44 +000016874 dev->netdev_ops = &tg3_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016875 dev->irq = pdev->irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016876
Nithin Nayak Sujir3d567e02012-11-14 14:44:26 +000016877 err = tg3_get_invariants(tp, ent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016878 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016879 dev_err(&pdev->dev,
16880 "Problem fetching invariants of chip, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016881 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070016882 }
16883
Michael Chan4a29cc22006-03-19 13:21:12 -080016884 /* The EPB bridge inside 5714, 5715, and 5780 and any
16885 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080016886 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16887 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16888 * do DMA address check in tg3_start_xmit().
16889 */
Joe Perches63c3a662011-04-26 08:12:10 +000016890 if (tg3_flag(tp, IS_5788))
Yang Hongyang284901a2009-04-06 19:01:15 -070016891 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
Joe Perches63c3a662011-04-26 08:12:10 +000016892 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
Yang Hongyang50cf1562009-04-06 19:01:14 -070016893 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
Michael Chan72f2afb2006-03-06 19:28:35 -080016894#ifdef CONFIG_HIGHMEM
Yang Hongyang6a355282009-04-06 19:01:13 -070016895 dma_mask = DMA_BIT_MASK(64);
Michael Chan72f2afb2006-03-06 19:28:35 -080016896#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080016897 } else
Yang Hongyang6a355282009-04-06 19:01:13 -070016898 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
Michael Chan72f2afb2006-03-06 19:28:35 -080016899
16900 /* Configure DMA attributes. */
Yang Hongyang284901a2009-04-06 19:01:15 -070016901 if (dma_mask > DMA_BIT_MASK(32)) {
Michael Chan72f2afb2006-03-06 19:28:35 -080016902 err = pci_set_dma_mask(pdev, dma_mask);
16903 if (!err) {
Matt Carlson0da06062011-05-19 12:12:53 +000016904 features |= NETIF_F_HIGHDMA;
Michael Chan72f2afb2006-03-06 19:28:35 -080016905 err = pci_set_consistent_dma_mask(pdev,
16906 persist_dma_mask);
16907 if (err < 0) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016908 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16909 "DMA for consistent allocations\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016910 goto err_out_apeunmap;
Michael Chan72f2afb2006-03-06 19:28:35 -080016911 }
16912 }
16913 }
Yang Hongyang284901a2009-04-06 19:01:15 -070016914 if (err || dma_mask == DMA_BIT_MASK(32)) {
16915 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Michael Chan72f2afb2006-03-06 19:28:35 -080016916 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016917 dev_err(&pdev->dev,
16918 "No usable DMA configuration, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016919 goto err_out_apeunmap;
Michael Chan72f2afb2006-03-06 19:28:35 -080016920 }
16921 }
16922
Michael Chanfdfec1722005-07-25 12:31:48 -070016923 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016924
Matt Carlson0da06062011-05-19 12:12:53 +000016925 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16926
16927 /* 5700 B0 chips do not support checksumming correctly due
16928 * to hardware bugs.
16929 */
Joe Perches41535772013-02-16 11:20:04 +000016930 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
Matt Carlson0da06062011-05-19 12:12:53 +000016931 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16932
16933 if (tg3_flag(tp, 5755_PLUS))
16934 features |= NETIF_F_IPV6_CSUM;
16935 }
16936
Michael Chan4e3a7aa2006-03-20 17:47:44 -080016937 /* TSO is on by default on chips that support hardware TSO.
16938 * Firmware TSO on older chips gives lower performance, so it
16939 * is off by default, but can be enabled using ethtool.
16940 */
Joe Perches63c3a662011-04-26 08:12:10 +000016941 if ((tg3_flag(tp, HW_TSO_1) ||
16942 tg3_flag(tp, HW_TSO_2) ||
16943 tg3_flag(tp, HW_TSO_3)) &&
Matt Carlson0da06062011-05-19 12:12:53 +000016944 (features & NETIF_F_IP_CSUM))
16945 features |= NETIF_F_TSO;
Joe Perches63c3a662011-04-26 08:12:10 +000016946 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
Matt Carlson0da06062011-05-19 12:12:53 +000016947 if (features & NETIF_F_IPV6_CSUM)
16948 features |= NETIF_F_TSO6;
Joe Perches63c3a662011-04-26 08:12:10 +000016949 if (tg3_flag(tp, HW_TSO_3) ||
Joe Perches41535772013-02-16 11:20:04 +000016950 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16951 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16952 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16953 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16954 tg3_asic_rev(tp) == ASIC_REV_57780)
Matt Carlson0da06062011-05-19 12:12:53 +000016955 features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070016956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016957
Matt Carlsond542fe22011-05-19 16:02:43 +000016958 dev->features |= features;
16959 dev->vlan_features |= features;
16960
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000016961 /*
16962 * Add loopback capability only for a subset of devices that support
16963 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16964 * loopback for the remaining devices.
16965 */
Joe Perches41535772013-02-16 11:20:04 +000016966 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000016967 !tg3_flag(tp, CPMU_PRESENT))
16968 /* Add the loopback capability */
Matt Carlson0da06062011-05-19 12:12:53 +000016969 features |= NETIF_F_LOOPBACK;
16970
Matt Carlson0da06062011-05-19 12:12:53 +000016971 dev->hw_features |= features;
Mahesh Bandewar06c03c02011-05-08 06:51:48 +000016972
Joe Perches41535772013-02-16 11:20:04 +000016973 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
Joe Perches63c3a662011-04-26 08:12:10 +000016974 !tg3_flag(tp, TSO_CAPABLE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070016975 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
Joe Perches63c3a662011-04-26 08:12:10 +000016976 tg3_flag_set(tp, MAX_RXPEND_64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070016977 tp->rx_pending = 63;
16978 }
16979
Linus Torvalds1da177e2005-04-16 15:20:36 -070016980 err = tg3_get_device_address(tp);
16981 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000016982 dev_err(&pdev->dev,
16983 "Could not obtain valid ethernet address, aborting\n");
Matt Carlsonc9cab242011-07-13 09:27:27 +000016984 goto err_out_apeunmap;
Matt Carlson0d3031d2007-10-10 18:02:43 -070016985 }
16986
Matt Carlsonc88864d2007-11-12 21:07:01 -080016987 /*
16988 * Reset chip in case UNDI or EFI driver did not shutdown
16989 * DMA self test will enable WDMAC and we'll see (spurious)
16990 * pending DMA on the PCI bus at that point.
16991 */
16992 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16993 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16994 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16995 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16996 }
16997
16998 err = tg3_test_dma(tp);
16999 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000017000 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
Matt Carlsonc88864d2007-11-12 21:07:01 -080017001 goto err_out_apeunmap;
17002 }
17003
Matt Carlson78f90dc2009-11-13 13:03:42 +000017004 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17005 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17006 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
Matt Carlson6fd45cb2010-09-15 08:59:57 +000017007 for (i = 0; i < tp->irq_max; i++) {
Matt Carlson78f90dc2009-11-13 13:03:42 +000017008 struct tg3_napi *tnapi = &tp->napi[i];
17009
17010 tnapi->tp = tp;
17011 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17012
17013 tnapi->int_mbox = intmbx;
Matt Carlson93a700a2011-08-31 11:44:54 +000017014 if (i <= 4)
Matt Carlson78f90dc2009-11-13 13:03:42 +000017015 intmbx += 0x8;
17016 else
17017 intmbx += 0x4;
17018
17019 tnapi->consmbox = rcvmbx;
17020 tnapi->prodmbox = sndmbx;
17021
Matt Carlson66cfd1b2010-09-30 10:34:30 +000017022 if (i)
Matt Carlson78f90dc2009-11-13 13:03:42 +000017023 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
Matt Carlson66cfd1b2010-09-30 10:34:30 +000017024 else
Matt Carlson78f90dc2009-11-13 13:03:42 +000017025 tnapi->coal_now = HOSTCC_MODE_NOW;
Matt Carlson78f90dc2009-11-13 13:03:42 +000017026
Joe Perches63c3a662011-04-26 08:12:10 +000017027 if (!tg3_flag(tp, SUPPORT_MSIX))
Matt Carlson78f90dc2009-11-13 13:03:42 +000017028 break;
17029
17030 /*
17031 * If we support MSIX, we'll be using RSS. If we're using
17032 * RSS, the first vector only handles link interrupts and the
17033 * remaining vectors handle rx and tx interrupts. Reuse the
17034 * mailbox values for the next iteration. The values we setup
17035 * above are still useful for the single vectored mode.
17036 */
17037 if (!i)
17038 continue;
17039
17040 rcvmbx += 0x8;
17041
17042 if (sndmbx & 0x4)
17043 sndmbx -= 0x4;
17044 else
17045 sndmbx += 0xc;
17046 }
17047
Matt Carlsonc88864d2007-11-12 21:07:01 -080017048 tg3_init_coal(tp);
17049
Michael Chanc49a1562006-12-17 17:07:29 -080017050 pci_set_drvdata(pdev, dev);
17051
Joe Perches41535772013-02-16 11:20:04 +000017052 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17053 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17054 tg3_asic_rev(tp) == ASIC_REV_5762)
Matt Carlsonfb4ce8a2012-12-03 19:37:00 +000017055 tg3_flag_set(tp, PTP_CAPABLE);
17056
Matt Carlsoncd0d7222011-07-13 09:27:33 +000017057 if (tg3_flag(tp, 5717_PLUS)) {
17058 /* Resume a low-power mode */
17059 tg3_frob_aux_power(tp, false);
17060 }
17061
Matt Carlson21f76382012-02-22 12:35:21 +000017062 tg3_timer_init(tp);
17063
Michael Chan402e1392013-02-14 12:13:41 +000017064 tg3_carrier_off(tp);
17065
Linus Torvalds1da177e2005-04-16 15:20:36 -070017066 err = register_netdev(dev);
17067 if (err) {
Matt Carlsonab96b242010-04-05 10:19:22 +000017068 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070017069 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070017070 }
17071
Joe Perches05dbe002010-02-17 19:44:19 +000017072 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17073 tp->board_part_number,
Joe Perches41535772013-02-16 11:20:04 +000017074 tg3_chip_rev_id(tp),
Joe Perches05dbe002010-02-17 19:44:19 +000017075 tg3_bus_string(tp, str),
17076 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017077
Matt Carlsonf07e9af2010-08-02 11:26:07 +000017078 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
Matt Carlson3f0e3ad2009-11-02 14:24:36 +000017079 struct phy_device *phydev;
17080 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
Matt Carlson5129c3a2010-04-05 10:19:23 +000017081 netdev_info(dev,
17082 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
Joe Perches05dbe002010-02-17 19:44:19 +000017083 phydev->drv->name, dev_name(&phydev->dev));
Matt Carlsonf07e9af2010-08-02 11:26:07 +000017084 } else {
17085 char *ethtype;
17086
17087 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17088 ethtype = "10/100Base-TX";
17089 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17090 ethtype = "1000Base-SX";
17091 else
17092 ethtype = "10/100/1000Base-T";
17093
Matt Carlson5129c3a2010-04-05 10:19:23 +000017094 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
Matt Carlson47007832011-04-20 07:57:43 +000017095 "(WireSpeed[%d], EEE[%d])\n",
17096 tg3_phy_string(tp), ethtype,
17097 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17098 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
Matt Carlsonf07e9af2010-08-02 11:26:07 +000017099 }
Matt Carlsondf59c942008-11-03 16:52:56 -080017100
Joe Perches05dbe002010-02-17 19:44:19 +000017101 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Michał Mirosławdc668912011-04-07 03:35:07 +000017102 (dev->features & NETIF_F_RXCSUM) != 0,
Joe Perches63c3a662011-04-26 08:12:10 +000017103 tg3_flag(tp, USE_LINKCHG_REG) != 0,
Matt Carlsonf07e9af2010-08-02 11:26:07 +000017104 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
Joe Perches63c3a662011-04-26 08:12:10 +000017105 tg3_flag(tp, ENABLE_ASF) != 0,
17106 tg3_flag(tp, TSO_CAPABLE) != 0);
Joe Perches05dbe002010-02-17 19:44:19 +000017107 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17108 tp->dma_rwctrl,
17109 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17110 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017111
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017112 pci_save_state(pdev);
17113
Linus Torvalds1da177e2005-04-16 15:20:36 -070017114 return 0;
17115
Matt Carlson0d3031d2007-10-10 18:02:43 -070017116err_out_apeunmap:
17117 if (tp->aperegs) {
17118 iounmap(tp->aperegs);
17119 tp->aperegs = NULL;
17120 }
17121
Linus Torvalds1da177e2005-04-16 15:20:36 -070017122err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070017123 if (tp->regs) {
17124 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070017125 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070017126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070017127
17128err_out_free_dev:
17129 free_netdev(dev);
17130
Matt Carlson16821282011-07-13 09:27:28 +000017131err_out_power_down:
17132 pci_set_power_state(pdev, PCI_D3hot);
17133
Linus Torvalds1da177e2005-04-16 15:20:36 -070017134err_out_free_res:
17135 pci_release_regions(pdev);
17136
17137err_out_disable_pdev:
17138 pci_disable_device(pdev);
17139 pci_set_drvdata(pdev, NULL);
17140 return err;
17141}
17142
Bill Pemberton229b1ad2012-12-03 09:22:59 -050017143static void tg3_remove_one(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017144{
17145 struct net_device *dev = pci_get_drvdata(pdev);
17146
17147 if (dev) {
17148 struct tg3 *tp = netdev_priv(dev);
17149
Jesper Juhle3c55302012-04-09 22:50:15 +020017150 release_firmware(tp->fw);
Jaswinder Singh Rajput077f8492009-01-04 16:11:25 -080017151
Matt Carlsondb219972011-11-04 09:15:03 +000017152 tg3_reset_task_cancel(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070017153
David S. Miller1805b2f2011-10-24 18:18:09 -040017154 if (tg3_flag(tp, USE_PHYLIB)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070017155 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070017156 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070017157 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070017158
Linus Torvalds1da177e2005-04-16 15:20:36 -070017159 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070017160 if (tp->aperegs) {
17161 iounmap(tp->aperegs);
17162 tp->aperegs = NULL;
17163 }
Michael Chan68929142005-08-09 20:17:14 -070017164 if (tp->regs) {
17165 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070017166 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070017167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070017168 free_netdev(dev);
17169 pci_release_regions(pdev);
17170 pci_disable_device(pdev);
17171 pci_set_drvdata(pdev, NULL);
17172 }
17173}
17174
Eric Dumazetaa6027c2011-01-01 05:22:46 +000017175#ifdef CONFIG_PM_SLEEP
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000017176static int tg3_suspend(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017177{
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000017178 struct pci_dev *pdev = to_pci_dev(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017179 struct net_device *dev = pci_get_drvdata(pdev);
17180 struct tg3 *tp = netdev_priv(dev);
17181 int err;
17182
17183 if (!netif_running(dev))
17184 return 0;
17185
Matt Carlsondb219972011-11-04 09:15:03 +000017186 tg3_reset_task_cancel(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070017187 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017188 tg3_netif_stop(tp);
17189
Matt Carlson21f76382012-02-22 12:35:21 +000017190 tg3_timer_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017191
David S. Millerf47c11e2005-06-24 20:18:35 -070017192 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017193 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070017194 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017195
17196 netif_device_detach(dev);
17197
David S. Millerf47c11e2005-06-24 20:18:35 -070017198 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070017199 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Joe Perches63c3a662011-04-26 08:12:10 +000017200 tg3_flag_clear(tp, INIT_COMPLETE);
David S. Millerf47c11e2005-06-24 20:18:35 -070017201 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017202
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000017203 err = tg3_power_down_prepare(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017204 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070017205 int err2;
17206
David S. Millerf47c11e2005-06-24 20:18:35 -070017207 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017208
Joe Perches63c3a662011-04-26 08:12:10 +000017209 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070017210 err2 = tg3_restart_hw(tp, 1);
17211 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070017212 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070017213
Matt Carlson21f76382012-02-22 12:35:21 +000017214 tg3_timer_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017215
17216 netif_device_attach(dev);
17217 tg3_netif_start(tp);
17218
Michael Chanb9ec6c12006-07-25 16:37:27 -070017219out:
David S. Millerf47c11e2005-06-24 20:18:35 -070017220 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070017221
17222 if (!err2)
17223 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017224 }
17225
17226 return err;
17227}
17228
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000017229static int tg3_resume(struct device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017230{
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000017231 struct pci_dev *pdev = to_pci_dev(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017232 struct net_device *dev = pci_get_drvdata(pdev);
17233 struct tg3 *tp = netdev_priv(dev);
17234 int err;
17235
17236 if (!netif_running(dev))
17237 return 0;
17238
Linus Torvalds1da177e2005-04-16 15:20:36 -070017239 netif_device_attach(dev);
17240
David S. Millerf47c11e2005-06-24 20:18:35 -070017241 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017242
Joe Perches63c3a662011-04-26 08:12:10 +000017243 tg3_flag_set(tp, INIT_COMPLETE);
Michael Chanb9ec6c12006-07-25 16:37:27 -070017244 err = tg3_restart_hw(tp, 1);
17245 if (err)
17246 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070017247
Matt Carlson21f76382012-02-22 12:35:21 +000017248 tg3_timer_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017249
Linus Torvalds1da177e2005-04-16 15:20:36 -070017250 tg3_netif_start(tp);
17251
Michael Chanb9ec6c12006-07-25 16:37:27 -070017252out:
David S. Millerf47c11e2005-06-24 20:18:35 -070017253 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017254
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070017255 if (!err)
17256 tg3_phy_start(tp);
17257
Michael Chanb9ec6c12006-07-25 16:37:27 -070017258 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070017259}
17260
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000017261static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
Eric Dumazetaa6027c2011-01-01 05:22:46 +000017262#define TG3_PM_OPS (&tg3_pm_ops)
17263
17264#else
17265
17266#define TG3_PM_OPS NULL
17267
17268#endif /* CONFIG_PM_SLEEP */
Rafael J. Wysockic866b7e2010-12-25 12:56:23 +000017269
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017270/**
17271 * tg3_io_error_detected - called when PCI error is detected
17272 * @pdev: Pointer to PCI device
17273 * @state: The current pci connection state
17274 *
17275 * This function is called after a PCI bus error affecting
17276 * this device has been detected.
17277 */
17278static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17279 pci_channel_state_t state)
17280{
17281 struct net_device *netdev = pci_get_drvdata(pdev);
17282 struct tg3 *tp = netdev_priv(netdev);
17283 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17284
17285 netdev_info(netdev, "PCI I/O error detected\n");
17286
17287 rtnl_lock();
17288
17289 if (!netif_running(netdev))
17290 goto done;
17291
17292 tg3_phy_stop(tp);
17293
17294 tg3_netif_stop(tp);
17295
Matt Carlson21f76382012-02-22 12:35:21 +000017296 tg3_timer_stop(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017297
17298 /* Want to make sure that the reset task doesn't run */
Matt Carlsondb219972011-11-04 09:15:03 +000017299 tg3_reset_task_cancel(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017300
17301 netif_device_detach(netdev);
17302
17303 /* Clean up software state, even if MMIO is blocked */
17304 tg3_full_lock(tp, 0);
17305 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17306 tg3_full_unlock(tp);
17307
17308done:
17309 if (state == pci_channel_io_perm_failure)
17310 err = PCI_ERS_RESULT_DISCONNECT;
17311 else
17312 pci_disable_device(pdev);
17313
17314 rtnl_unlock();
17315
17316 return err;
17317}
17318
17319/**
17320 * tg3_io_slot_reset - called after the pci bus has been reset.
17321 * @pdev: Pointer to PCI device
17322 *
17323 * Restart the card from scratch, as if from a cold-boot.
17324 * At this point, the card has exprienced a hard reset,
17325 * followed by fixups by BIOS, and has its config space
17326 * set up identically to what it was at cold boot.
17327 */
17328static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17329{
17330 struct net_device *netdev = pci_get_drvdata(pdev);
17331 struct tg3 *tp = netdev_priv(netdev);
17332 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17333 int err;
17334
17335 rtnl_lock();
17336
17337 if (pci_enable_device(pdev)) {
17338 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17339 goto done;
17340 }
17341
17342 pci_set_master(pdev);
17343 pci_restore_state(pdev);
17344 pci_save_state(pdev);
17345
17346 if (!netif_running(netdev)) {
17347 rc = PCI_ERS_RESULT_RECOVERED;
17348 goto done;
17349 }
17350
17351 err = tg3_power_up(tp);
Matt Carlsonbed98292011-07-13 09:27:29 +000017352 if (err)
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017353 goto done;
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017354
17355 rc = PCI_ERS_RESULT_RECOVERED;
17356
17357done:
17358 rtnl_unlock();
17359
17360 return rc;
17361}
17362
17363/**
17364 * tg3_io_resume - called when traffic can start flowing again.
17365 * @pdev: Pointer to PCI device
17366 *
17367 * This callback is called when the error recovery driver tells
17368 * us that its OK to resume normal operation.
17369 */
17370static void tg3_io_resume(struct pci_dev *pdev)
17371{
17372 struct net_device *netdev = pci_get_drvdata(pdev);
17373 struct tg3 *tp = netdev_priv(netdev);
17374 int err;
17375
17376 rtnl_lock();
17377
17378 if (!netif_running(netdev))
17379 goto done;
17380
17381 tg3_full_lock(tp, 0);
Joe Perches63c3a662011-04-26 08:12:10 +000017382 tg3_flag_set(tp, INIT_COMPLETE);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017383 err = tg3_restart_hw(tp, 1);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017384 if (err) {
Nithin Nayak Sujir35763062012-12-03 19:36:56 +000017385 tg3_full_unlock(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017386 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17387 goto done;
17388 }
17389
17390 netif_device_attach(netdev);
17391
Matt Carlson21f76382012-02-22 12:35:21 +000017392 tg3_timer_start(tp);
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017393
17394 tg3_netif_start(tp);
17395
Nithin Nayak Sujir35763062012-12-03 19:36:56 +000017396 tg3_full_unlock(tp);
17397
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017398 tg3_phy_start(tp);
17399
17400done:
17401 rtnl_unlock();
17402}
17403
Stephen Hemminger3646f0e2012-09-07 09:33:15 -070017404static const struct pci_error_handlers tg3_err_handler = {
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017405 .error_detected = tg3_io_error_detected,
17406 .slot_reset = tg3_io_slot_reset,
17407 .resume = tg3_io_resume
17408};
17409
Linus Torvalds1da177e2005-04-16 15:20:36 -070017410static struct pci_driver tg3_driver = {
17411 .name = DRV_MODULE_NAME,
17412 .id_table = tg3_pci_tbl,
17413 .probe = tg3_init_one,
Bill Pemberton229b1ad2012-12-03 09:22:59 -050017414 .remove = tg3_remove_one,
Matt Carlsonb45aa2f2011-04-25 12:42:48 +000017415 .err_handler = &tg3_err_handler,
Eric Dumazetaa6027c2011-01-01 05:22:46 +000017416 .driver.pm = TG3_PM_OPS,
Linus Torvalds1da177e2005-04-16 15:20:36 -070017417};
17418
17419static int __init tg3_init(void)
17420{
Jeff Garzik29917622006-08-19 17:48:59 -040017421 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017422}
17423
17424static void __exit tg3_cleanup(void)
17425{
17426 pci_unregister_driver(&tg3_driver);
17427}
17428
17429module_init(tg3_init);
17430module_exit(tg3_cleanup);