blob: d7c98515fdfdd55d91cdd3fb54b1ca252ce2ec09 [file] [log] [blame]
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001/*
2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
5 *
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
9 *
Stephen Hemminger747802a2005-06-27 11:33:16 -070010 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -040011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/pci.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/delay.h>
38#include <linux/crc32.h>
Al Viro40754002005-04-03 09:15:52 +010039#include <linux/dma-mapping.h>
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -040040#include <asm/irq.h>
41
42#include "skge.h"
43
44#define DRV_NAME "skge"
Stephen Hemminger54cfb5a2005-08-16 14:01:05 -070045#define DRV_VERSION "0.9"
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -040046#define PFX DRV_NAME " "
47
48#define DEFAULT_TX_RING_SIZE 128
49#define DEFAULT_RX_RING_SIZE 512
50#define MAX_TX_RING_SIZE 1024
51#define MAX_RX_RING_SIZE 4096
Stephen Hemminger19a33d42005-06-27 11:33:15 -070052#define RX_COPY_THRESHOLD 128
53#define RX_BUF_SIZE 1536
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -040054#define PHY_RETRIES 1000
55#define ETH_JUMBO_MTU 9000
56#define TX_WATCHDOG (5 * HZ)
57#define NAPI_WEIGHT 64
Stephen Hemminger6abebb52005-07-22 16:26:10 -070058#define BLINK_MS 250
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -040059
60MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
61MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
62MODULE_LICENSE("GPL");
63MODULE_VERSION(DRV_VERSION);
64
65static const u32 default_msg
66 = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
67 | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
68
69static int debug = -1; /* defaults above */
70module_param(debug, int, 0);
71MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
72
73static const struct pci_device_id skge_id_table[] = {
Stephen Hemminger275834d2005-06-27 11:33:03 -070074 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
75 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
76 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
77 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
Stephen Hemminger275834d2005-06-27 11:33:03 -070078 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
79 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
81 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
Stephen Hemminger275834d2005-06-27 11:33:03 -070082 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
Francois Romieu86f0cd52005-08-24 01:14:23 +020083 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -040084 { 0 }
85};
86MODULE_DEVICE_TABLE(pci, skge_id_table);
87
88static int skge_up(struct net_device *dev);
89static int skge_down(struct net_device *dev);
90static void skge_tx_clean(struct skge_port *skge);
Stephen Hemminger6b0c1482005-06-27 11:33:04 -070091static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
92static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -040093static void genesis_get_stats(struct skge_port *skge, u64 *data);
94static void yukon_get_stats(struct skge_port *skge, u64 *data);
95static void yukon_init(struct skge_hw *hw, int port);
96static void yukon_reset(struct skge_hw *hw, int port);
97static void genesis_mac_init(struct skge_hw *hw, int port);
98static void genesis_reset(struct skge_hw *hw, int port);
Stephen Hemminger45bada62005-06-27 11:33:12 -070099static void genesis_link_up(struct skge_port *skge);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400100
Stephen Hemminger7e676d92005-06-27 11:33:13 -0700101/* Avoid conditionals by using array */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400102static const int txqaddr[] = { Q_XA1, Q_XA2 };
103static const int rxqaddr[] = { Q_R1, Q_R2 };
104static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
Stephen Hemminger7e676d92005-06-27 11:33:13 -0700106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400107
108/* Don't need to look at whole 16K.
109 * last interesting register is descriptor poll timer.
110 */
111#define SKGE_REGS_LEN (29*128)
112
113static int skge_get_regs_len(struct net_device *dev)
114{
115 return SKGE_REGS_LEN;
116}
117
118/*
119 * Returns copy of control register region
120 * I/O region is divided into banks and certain regions are unreadable
121 */
122static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
123 void *p)
124{
125 const struct skge_port *skge = netdev_priv(dev);
126 unsigned long offs;
127 const void __iomem *io = skge->hw->regs;
128 static const unsigned long bankmap
129 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
130 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
131 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
132 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
133
134 regs->version = 1;
135 for (offs = 0; offs < regs->len; offs += 128) {
136 u32 len = min_t(u32, 128, regs->len - offs);
137
138 if (bankmap & (1<<(offs/128)))
139 memcpy_fromio(p + offs, io + offs, len);
140 else
141 memset(p + offs, 0, len);
142 }
143}
144
145/* Wake on Lan only supported on Yukon chps with rev 1 or above */
146static int wol_supported(const struct skge_hw *hw)
147{
148 return !((hw->chip_id == CHIP_ID_GENESIS ||
Stephen Hemminger981d0372005-06-27 11:33:06 -0700149 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)));
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400150}
151
152static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
153{
154 struct skge_port *skge = netdev_priv(dev);
155
156 wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0;
157 wol->wolopts = skge->wol ? WAKE_MAGIC : 0;
158}
159
160static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
161{
162 struct skge_port *skge = netdev_priv(dev);
163 struct skge_hw *hw = skge->hw;
164
Stephen Hemminger95566062005-06-27 11:33:02 -0700165 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400166 return -EOPNOTSUPP;
167
168 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw))
169 return -EOPNOTSUPP;
170
171 skge->wol = wol->wolopts == WAKE_MAGIC;
172
173 if (skge->wol) {
174 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
175
176 skge_write16(hw, WOL_CTRL_STAT,
177 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
178 WOL_CTL_ENA_MAGIC_PKT_UNIT);
179 } else
180 skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
181
182 return 0;
183}
184
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700185/* Determine supported/adverised modes based on hardware.
186 * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx
187 */
188static u32 skge_supported_modes(const struct skge_hw *hw)
189{
190 u32 supported;
191
Stephen Hemminger5e1705d2005-08-16 14:00:58 -0700192 if (hw->copper) {
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700193 supported = SUPPORTED_10baseT_Half
194 | SUPPORTED_10baseT_Full
195 | SUPPORTED_100baseT_Half
196 | SUPPORTED_100baseT_Full
197 | SUPPORTED_1000baseT_Half
198 | SUPPORTED_1000baseT_Full
199 | SUPPORTED_Autoneg| SUPPORTED_TP;
200
201 if (hw->chip_id == CHIP_ID_GENESIS)
202 supported &= ~(SUPPORTED_10baseT_Half
203 | SUPPORTED_10baseT_Full
204 | SUPPORTED_100baseT_Half
205 | SUPPORTED_100baseT_Full);
206
207 else if (hw->chip_id == CHIP_ID_YUKON)
208 supported &= ~SUPPORTED_1000baseT_Half;
209 } else
210 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
211 | SUPPORTED_Autoneg;
212
213 return supported;
214}
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400215
216static int skge_get_settings(struct net_device *dev,
217 struct ethtool_cmd *ecmd)
218{
219 struct skge_port *skge = netdev_priv(dev);
220 struct skge_hw *hw = skge->hw;
221
222 ecmd->transceiver = XCVR_INTERNAL;
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700223 ecmd->supported = skge_supported_modes(hw);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400224
Stephen Hemminger5e1705d2005-08-16 14:00:58 -0700225 if (hw->copper) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400226 ecmd->port = PORT_TP;
227 ecmd->phy_address = hw->phy_addr;
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700228 } else
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400229 ecmd->port = PORT_FIBRE;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400230
231 ecmd->advertising = skge->advertising;
232 ecmd->autoneg = skge->autoneg;
233 ecmd->speed = skge->speed;
234 ecmd->duplex = skge->duplex;
235 return 0;
236}
237
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400238static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
239{
240 struct skge_port *skge = netdev_priv(dev);
241 const struct skge_hw *hw = skge->hw;
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700242 u32 supported = skge_supported_modes(hw);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400243
244 if (ecmd->autoneg == AUTONEG_ENABLE) {
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700245 ecmd->advertising = supported;
246 skge->duplex = -1;
247 skge->speed = -1;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400248 } else {
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700249 u32 setting;
250
Stephen Hemminger2c668512005-07-22 16:26:07 -0700251 switch (ecmd->speed) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400252 case SPEED_1000:
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700253 if (ecmd->duplex == DUPLEX_FULL)
254 setting = SUPPORTED_1000baseT_Full;
255 else if (ecmd->duplex == DUPLEX_HALF)
256 setting = SUPPORTED_1000baseT_Half;
257 else
258 return -EINVAL;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400259 break;
260 case SPEED_100:
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700261 if (ecmd->duplex == DUPLEX_FULL)
262 setting = SUPPORTED_100baseT_Full;
263 else if (ecmd->duplex == DUPLEX_HALF)
264 setting = SUPPORTED_100baseT_Half;
265 else
266 return -EINVAL;
267 break;
268
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400269 case SPEED_10:
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700270 if (ecmd->duplex == DUPLEX_FULL)
271 setting = SUPPORTED_10baseT_Full;
272 else if (ecmd->duplex == DUPLEX_HALF)
273 setting = SUPPORTED_10baseT_Half;
274 else
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400275 return -EINVAL;
276 break;
277 default:
278 return -EINVAL;
279 }
Stephen Hemminger31b619c2005-06-27 11:33:11 -0700280
281 if ((setting & supported) == 0)
282 return -EINVAL;
283
284 skge->speed = ecmd->speed;
285 skge->duplex = ecmd->duplex;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400286 }
287
288 skge->autoneg = ecmd->autoneg;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400289 skge->advertising = ecmd->advertising;
290
291 if (netif_running(dev)) {
292 skge_down(dev);
293 skge_up(dev);
294 }
295 return (0);
296}
297
298static void skge_get_drvinfo(struct net_device *dev,
299 struct ethtool_drvinfo *info)
300{
301 struct skge_port *skge = netdev_priv(dev);
302
303 strcpy(info->driver, DRV_NAME);
304 strcpy(info->version, DRV_VERSION);
305 strcpy(info->fw_version, "N/A");
306 strcpy(info->bus_info, pci_name(skge->hw->pdev));
307}
308
309static const struct skge_stat {
310 char name[ETH_GSTRING_LEN];
311 u16 xmac_offset;
312 u16 gma_offset;
313} skge_stats[] = {
314 { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI },
315 { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI },
316
317 { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK },
318 { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK },
319 { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK },
320 { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK },
321 { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK },
322 { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK },
323 { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE },
324 { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE },
325
326 { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL },
327 { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL },
328 { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL },
329 { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL },
330 { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
331 { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
332
333 { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
334 { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
335 { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG },
336 { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
337 { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
338};
339
340static int skge_get_stats_count(struct net_device *dev)
341{
342 return ARRAY_SIZE(skge_stats);
343}
344
345static void skge_get_ethtool_stats(struct net_device *dev,
346 struct ethtool_stats *stats, u64 *data)
347{
348 struct skge_port *skge = netdev_priv(dev);
349
350 if (skge->hw->chip_id == CHIP_ID_GENESIS)
351 genesis_get_stats(skge, data);
352 else
353 yukon_get_stats(skge, data);
354}
355
356/* Use hardware MIB variables for critical path statistics and
357 * transmit feedback not reported at interrupt.
358 * Other errors are accounted for in interrupt handler.
359 */
360static struct net_device_stats *skge_get_stats(struct net_device *dev)
361{
362 struct skge_port *skge = netdev_priv(dev);
363 u64 data[ARRAY_SIZE(skge_stats)];
364
365 if (skge->hw->chip_id == CHIP_ID_GENESIS)
366 genesis_get_stats(skge, data);
367 else
368 yukon_get_stats(skge, data);
369
370 skge->net_stats.tx_bytes = data[0];
371 skge->net_stats.rx_bytes = data[1];
372 skge->net_stats.tx_packets = data[2] + data[4] + data[6];
373 skge->net_stats.rx_packets = data[3] + data[5] + data[7];
374 skge->net_stats.multicast = data[5] + data[7];
375 skge->net_stats.collisions = data[10];
376 skge->net_stats.tx_aborted_errors = data[12];
377
378 return &skge->net_stats;
379}
380
381static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
382{
383 int i;
384
Stephen Hemminger95566062005-06-27 11:33:02 -0700385 switch (stringset) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400386 case ETH_SS_STATS:
387 for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
388 memcpy(data + i * ETH_GSTRING_LEN,
389 skge_stats[i].name, ETH_GSTRING_LEN);
390 break;
391 }
392}
393
394static void skge_get_ring_param(struct net_device *dev,
395 struct ethtool_ringparam *p)
396{
397 struct skge_port *skge = netdev_priv(dev);
398
399 p->rx_max_pending = MAX_RX_RING_SIZE;
400 p->tx_max_pending = MAX_TX_RING_SIZE;
401 p->rx_mini_max_pending = 0;
402 p->rx_jumbo_max_pending = 0;
403
404 p->rx_pending = skge->rx_ring.count;
405 p->tx_pending = skge->tx_ring.count;
406 p->rx_mini_pending = 0;
407 p->rx_jumbo_pending = 0;
408}
409
410static int skge_set_ring_param(struct net_device *dev,
411 struct ethtool_ringparam *p)
412{
413 struct skge_port *skge = netdev_priv(dev);
414
415 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
416 p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE)
417 return -EINVAL;
418
419 skge->rx_ring.count = p->rx_pending;
420 skge->tx_ring.count = p->tx_pending;
421
422 if (netif_running(dev)) {
423 skge_down(dev);
424 skge_up(dev);
425 }
426
427 return 0;
428}
429
430static u32 skge_get_msglevel(struct net_device *netdev)
431{
432 struct skge_port *skge = netdev_priv(netdev);
433 return skge->msg_enable;
434}
435
436static void skge_set_msglevel(struct net_device *netdev, u32 value)
437{
438 struct skge_port *skge = netdev_priv(netdev);
439 skge->msg_enable = value;
440}
441
442static int skge_nway_reset(struct net_device *dev)
443{
444 struct skge_port *skge = netdev_priv(dev);
445 struct skge_hw *hw = skge->hw;
446 int port = skge->port;
447
448 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
449 return -EINVAL;
450
451 spin_lock_bh(&hw->phy_lock);
452 if (hw->chip_id == CHIP_ID_GENESIS) {
453 genesis_reset(hw, port);
454 genesis_mac_init(hw, port);
455 } else {
456 yukon_reset(hw, port);
457 yukon_init(hw, port);
458 }
459 spin_unlock_bh(&hw->phy_lock);
460 return 0;
461}
462
463static int skge_set_sg(struct net_device *dev, u32 data)
464{
465 struct skge_port *skge = netdev_priv(dev);
466 struct skge_hw *hw = skge->hw;
467
468 if (hw->chip_id == CHIP_ID_GENESIS && data)
469 return -EOPNOTSUPP;
470 return ethtool_op_set_sg(dev, data);
471}
472
473static int skge_set_tx_csum(struct net_device *dev, u32 data)
474{
475 struct skge_port *skge = netdev_priv(dev);
476 struct skge_hw *hw = skge->hw;
477
478 if (hw->chip_id == CHIP_ID_GENESIS && data)
479 return -EOPNOTSUPP;
480
481 return ethtool_op_set_tx_csum(dev, data);
482}
483
484static u32 skge_get_rx_csum(struct net_device *dev)
485{
486 struct skge_port *skge = netdev_priv(dev);
487
488 return skge->rx_csum;
489}
490
491/* Only Yukon supports checksum offload. */
492static int skge_set_rx_csum(struct net_device *dev, u32 data)
493{
494 struct skge_port *skge = netdev_priv(dev);
495
496 if (skge->hw->chip_id == CHIP_ID_GENESIS && data)
497 return -EOPNOTSUPP;
498
499 skge->rx_csum = data;
500 return 0;
501}
502
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400503static void skge_get_pauseparam(struct net_device *dev,
504 struct ethtool_pauseparam *ecmd)
505{
506 struct skge_port *skge = netdev_priv(dev);
507
508 ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND)
509 || (skge->flow_control == FLOW_MODE_SYMMETRIC);
510 ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND)
511 || (skge->flow_control == FLOW_MODE_SYMMETRIC);
512
513 ecmd->autoneg = skge->autoneg;
514}
515
516static int skge_set_pauseparam(struct net_device *dev,
517 struct ethtool_pauseparam *ecmd)
518{
519 struct skge_port *skge = netdev_priv(dev);
520
521 skge->autoneg = ecmd->autoneg;
522 if (ecmd->rx_pause && ecmd->tx_pause)
523 skge->flow_control = FLOW_MODE_SYMMETRIC;
Stephen Hemminger95566062005-06-27 11:33:02 -0700524 else if (ecmd->rx_pause && !ecmd->tx_pause)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400525 skge->flow_control = FLOW_MODE_REM_SEND;
Stephen Hemminger95566062005-06-27 11:33:02 -0700526 else if (!ecmd->rx_pause && ecmd->tx_pause)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400527 skge->flow_control = FLOW_MODE_LOC_SEND;
528 else
529 skge->flow_control = FLOW_MODE_NONE;
530
531 if (netif_running(dev)) {
532 skge_down(dev);
533 skge_up(dev);
534 }
535 return 0;
536}
537
538/* Chip internal frequency for clock calculations */
539static inline u32 hwkhz(const struct skge_hw *hw)
540{
541 if (hw->chip_id == CHIP_ID_GENESIS)
542 return 53215; /* or: 53.125 MHz */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400543 else
544 return 78215; /* or: 78.125 MHz */
545}
546
547/* Chip hz to microseconds */
548static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
549{
550 return (ticks * 1000) / hwkhz(hw);
551}
552
553/* Microseconds to chip hz */
554static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
555{
556 return hwkhz(hw) * usec / 1000;
557}
558
559static int skge_get_coalesce(struct net_device *dev,
560 struct ethtool_coalesce *ecmd)
561{
562 struct skge_port *skge = netdev_priv(dev);
563 struct skge_hw *hw = skge->hw;
564 int port = skge->port;
565
566 ecmd->rx_coalesce_usecs = 0;
567 ecmd->tx_coalesce_usecs = 0;
568
569 if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
570 u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
571 u32 msk = skge_read32(hw, B2_IRQM_MSK);
572
573 if (msk & rxirqmask[port])
574 ecmd->rx_coalesce_usecs = delay;
575 if (msk & txirqmask[port])
576 ecmd->tx_coalesce_usecs = delay;
577 }
578
579 return 0;
580}
581
582/* Note: interrupt timer is per board, but can turn on/off per port */
583static int skge_set_coalesce(struct net_device *dev,
584 struct ethtool_coalesce *ecmd)
585{
586 struct skge_port *skge = netdev_priv(dev);
587 struct skge_hw *hw = skge->hw;
588 int port = skge->port;
589 u32 msk = skge_read32(hw, B2_IRQM_MSK);
590 u32 delay = 25;
591
592 if (ecmd->rx_coalesce_usecs == 0)
593 msk &= ~rxirqmask[port];
594 else if (ecmd->rx_coalesce_usecs < 25 ||
595 ecmd->rx_coalesce_usecs > 33333)
596 return -EINVAL;
597 else {
598 msk |= rxirqmask[port];
599 delay = ecmd->rx_coalesce_usecs;
600 }
601
602 if (ecmd->tx_coalesce_usecs == 0)
603 msk &= ~txirqmask[port];
604 else if (ecmd->tx_coalesce_usecs < 25 ||
605 ecmd->tx_coalesce_usecs > 33333)
606 return -EINVAL;
607 else {
608 msk |= txirqmask[port];
609 delay = min(delay, ecmd->rx_coalesce_usecs);
610 }
611
612 skge_write32(hw, B2_IRQM_MSK, msk);
613 if (msk == 0)
614 skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
615 else {
616 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
617 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
618 }
619 return 0;
620}
621
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700622enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
623static void skge_led(struct skge_port *skge, enum led_mode mode)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400624{
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400625 struct skge_hw *hw = skge->hw;
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700626 int port = skge->port;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400627
Stephen Hemminger4ff6ac02005-07-22 16:26:05 -0700628 spin_lock_bh(&hw->phy_lock);
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700629 if (hw->chip_id == CHIP_ID_GENESIS) {
630 switch (mode) {
631 case LED_MODE_OFF:
632 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
633 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
634 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
635 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
636 break;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400637
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700638 case LED_MODE_ON:
639 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
640 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
641
642 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
643 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
644
645 break;
646
647 case LED_MODE_TST:
648 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
649 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
650 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
651
652 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
653 break;
654 }
655 } else {
656 switch (mode) {
657 case LED_MODE_OFF:
658 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
659 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
660 PHY_M_LED_MO_DUP(MO_LED_OFF) |
661 PHY_M_LED_MO_10(MO_LED_OFF) |
662 PHY_M_LED_MO_100(MO_LED_OFF) |
663 PHY_M_LED_MO_1000(MO_LED_OFF) |
664 PHY_M_LED_MO_RX(MO_LED_OFF));
665 break;
666 case LED_MODE_ON:
667 gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
668 PHY_M_LED_PULS_DUR(PULS_170MS) |
669 PHY_M_LED_BLINK_RT(BLINK_84MS) |
670 PHY_M_LEDC_TX_CTRL |
671 PHY_M_LEDC_DP_CTRL);
672
673 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
674 PHY_M_LED_MO_RX(MO_LED_OFF) |
675 (skge->speed == SPEED_100 ?
676 PHY_M_LED_MO_100(MO_LED_ON) : 0));
677 break;
678 case LED_MODE_TST:
679 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
680 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
681 PHY_M_LED_MO_DUP(MO_LED_ON) |
682 PHY_M_LED_MO_10(MO_LED_ON) |
683 PHY_M_LED_MO_100(MO_LED_ON) |
684 PHY_M_LED_MO_1000(MO_LED_ON) |
685 PHY_M_LED_MO_RX(MO_LED_ON));
686 }
687 }
688 spin_unlock_bh(&hw->phy_lock);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400689}
690
691/* blink LED's for finding board */
692static int skge_phys_id(struct net_device *dev, u32 data)
693{
694 struct skge_port *skge = netdev_priv(dev);
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700695 unsigned long ms;
696 enum led_mode mode = LED_MODE_TST;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400697
Stephen Hemminger95566062005-06-27 11:33:02 -0700698 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700699 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000;
700 else
701 ms = data * 1000;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400702
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700703 while (ms > 0) {
704 skge_led(skge, mode);
705 mode ^= LED_MODE_TST;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400706
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700707 if (msleep_interruptible(BLINK_MS))
708 break;
709 ms -= BLINK_MS;
710 }
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400711
Stephen Hemminger6abebb52005-07-22 16:26:10 -0700712 /* back to regular LED state */
713 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400714
715 return 0;
716}
717
718static struct ethtool_ops skge_ethtool_ops = {
719 .get_settings = skge_get_settings,
720 .set_settings = skge_set_settings,
721 .get_drvinfo = skge_get_drvinfo,
722 .get_regs_len = skge_get_regs_len,
723 .get_regs = skge_get_regs,
724 .get_wol = skge_get_wol,
725 .set_wol = skge_set_wol,
726 .get_msglevel = skge_get_msglevel,
727 .set_msglevel = skge_set_msglevel,
728 .nway_reset = skge_nway_reset,
729 .get_link = ethtool_op_get_link,
730 .get_ringparam = skge_get_ring_param,
731 .set_ringparam = skge_set_ring_param,
732 .get_pauseparam = skge_get_pauseparam,
733 .set_pauseparam = skge_set_pauseparam,
734 .get_coalesce = skge_get_coalesce,
735 .set_coalesce = skge_set_coalesce,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400736 .get_sg = ethtool_op_get_sg,
737 .set_sg = skge_set_sg,
738 .get_tx_csum = ethtool_op_get_tx_csum,
739 .set_tx_csum = skge_set_tx_csum,
740 .get_rx_csum = skge_get_rx_csum,
741 .set_rx_csum = skge_set_rx_csum,
742 .get_strings = skge_get_strings,
743 .phys_id = skge_phys_id,
744 .get_stats_count = skge_get_stats_count,
745 .get_ethtool_stats = skge_get_ethtool_stats,
746};
747
748/*
749 * Allocate ring elements and chain them together
750 * One-to-one association of board descriptors with ring elements
751 */
752static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
753{
754 struct skge_tx_desc *d;
755 struct skge_element *e;
756 int i;
757
758 ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL);
759 if (!ring->start)
760 return -ENOMEM;
761
762 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
763 e->desc = d;
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700764 e->skb = NULL;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400765 if (i == ring->count - 1) {
766 e->next = ring->start;
767 d->next_offset = base;
768 } else {
769 e->next = e + 1;
770 d->next_offset = base + (i+1) * sizeof(*d);
771 }
772 }
773 ring->to_use = ring->to_clean = ring->start;
774
775 return 0;
776}
777
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700778static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400779{
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700780 struct sk_buff *skb = dev_alloc_skb(size);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400781
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700782 if (likely(skb)) {
783 skb->dev = dev;
784 skb_reserve(skb, NET_IP_ALIGN);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400785 }
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700786 return skb;
787}
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400788
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700789/* Allocate and setup a new buffer for receiving */
790static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
791 struct sk_buff *skb, unsigned int bufsize)
792{
793 struct skge_rx_desc *rd = e->desc;
794 u64 map;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400795
796 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
797 PCI_DMA_FROMDEVICE);
798
799 rd->dma_lo = map;
800 rd->dma_hi = map >> 32;
801 e->skb = skb;
802 rd->csum1_start = ETH_HLEN;
803 rd->csum2_start = ETH_HLEN;
804 rd->csum1 = 0;
805 rd->csum2 = 0;
806
807 wmb();
808
809 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
810 pci_unmap_addr_set(e, mapaddr, map);
811 pci_unmap_len_set(e, maplen, bufsize);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400812}
813
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700814/* Resume receiving using existing skb,
815 * Note: DMA address is not changed by chip.
816 * MTU not changed while receiver active.
817 */
818static void skge_rx_reuse(struct skge_element *e, unsigned int size)
819{
820 struct skge_rx_desc *rd = e->desc;
821
822 rd->csum2 = 0;
823 rd->csum2_start = ETH_HLEN;
824
825 wmb();
826
827 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
828}
829
830
831/* Free all buffers in receive ring, assumes receiver stopped */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400832static void skge_rx_clean(struct skge_port *skge)
833{
834 struct skge_hw *hw = skge->hw;
835 struct skge_ring *ring = &skge->rx_ring;
836 struct skge_element *e;
837
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700838 e = ring->start;
839 do {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400840 struct skge_rx_desc *rd = e->desc;
841 rd->control = 0;
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700842 if (e->skb) {
843 pci_unmap_single(hw->pdev,
844 pci_unmap_addr(e, mapaddr),
845 pci_unmap_len(e, maplen),
846 PCI_DMA_FROMDEVICE);
847 dev_kfree_skb(e->skb);
848 e->skb = NULL;
849 }
850 } while ((e = e->next) != ring->start);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400851}
852
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700853
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400854/* Allocate buffers for receive ring
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700855 * For receive: to_clean is next received frame.
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400856 */
857static int skge_rx_fill(struct skge_port *skge)
858{
859 struct skge_ring *ring = &skge->rx_ring;
860 struct skge_element *e;
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700861 unsigned int bufsize = skge->rx_buf_size;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400862
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700863 e = ring->start;
864 do {
865 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400866
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700867 if (!skb)
868 return -ENOMEM;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400869
Stephen Hemminger19a33d42005-06-27 11:33:15 -0700870 skge_rx_setup(skge, e, skb, bufsize);
871 } while ( (e = e->next) != ring->start);
872
873 ring->to_clean = ring->start;
874 return 0;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400875}
876
877static void skge_link_up(struct skge_port *skge)
878{
Stephen Hemminger54cfb5a2005-08-16 14:01:05 -0700879 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
880 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
881
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400882 netif_carrier_on(skge->netdev);
883 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
884 netif_wake_queue(skge->netdev);
885
886 if (netif_msg_link(skge))
887 printk(KERN_INFO PFX
888 "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
889 skge->netdev->name, skge->speed,
890 skge->duplex == DUPLEX_FULL ? "full" : "half",
891 (skge->flow_control == FLOW_MODE_NONE) ? "none" :
892 (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" :
893 (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
894 (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
895 "unknown");
896}
897
898static void skge_link_down(struct skge_port *skge)
899{
Stephen Hemminger54cfb5a2005-08-16 14:01:05 -0700900 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400901 netif_carrier_off(skge->netdev);
902 netif_stop_queue(skge->netdev);
903
904 if (netif_msg_link(skge))
905 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
906}
907
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700908static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400909{
910 int i;
911 u16 v;
912
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700913 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
914 v = xm_read16(hw, port, XM_PHY_DATA);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400915
Stephen Hemminger89bf5f22005-06-27 11:33:10 -0700916 /* Need to wait for external PHY */
917 for (i = 0; i < PHY_RETRIES; i++) {
918 udelay(1);
919 if (xm_read16(hw, port, XM_MMU_CMD)
920 & XM_MMU_PHY_RDY)
921 goto ready;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400922 }
923
Stephen Hemminger89bf5f22005-06-27 11:33:10 -0700924 printk(KERN_WARNING PFX "%s: phy read timed out\n",
925 hw->dev[port]->name);
926 return 0;
927 ready:
928 v = xm_read16(hw, port, XM_PHY_DATA);
929
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400930 return v;
931}
932
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700933static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400934{
935 int i;
936
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700937 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400938 for (i = 0; i < PHY_RETRIES; i++) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700939 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400940 goto ready;
Stephen Hemminger89bf5f22005-06-27 11:33:10 -0700941 udelay(1);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400942 }
943 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n",
944 hw->dev[port]->name);
945
946
947 ready:
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700948 xm_write16(hw, port, XM_PHY_DATA, val);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400949 for (i = 0; i < PHY_RETRIES; i++) {
950 udelay(1);
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700951 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400952 return;
953 }
954 printk(KERN_WARNING PFX "%s: phy write timed out\n",
955 hw->dev[port]->name);
956}
957
958static void genesis_init(struct skge_hw *hw)
959{
960 /* set blink source counter */
961 skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
962 skge_write8(hw, B2_BSC_CTRL, BSC_START);
963
964 /* configure mac arbiter */
965 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
966
967 /* configure mac arbiter timeout values */
968 skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
969 skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
970 skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
971 skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
972
973 skge_write8(hw, B3_MA_RCINI_RX1, 0);
974 skge_write8(hw, B3_MA_RCINI_RX2, 0);
975 skge_write8(hw, B3_MA_RCINI_TX1, 0);
976 skge_write8(hw, B3_MA_RCINI_TX2, 0);
977
978 /* configure packet arbiter timeout */
979 skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
980 skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
981 skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
982 skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
983 skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
984}
985
986static void genesis_reset(struct skge_hw *hw, int port)
987{
Stephen Hemminger45bada62005-06-27 11:33:12 -0700988 const u8 zero[8] = { 0 };
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400989
990 /* reset the statistics module */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -0700991 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
992 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
993 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
994 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
995 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400996
Stephen Hemminger89bf5f22005-06-27 11:33:10 -0700997 /* disable Broadcom PHY IRQ */
998 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -0400999
Stephen Hemminger45bada62005-06-27 11:33:12 -07001000 xm_outhash(hw, port, XM_HSM, zero);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001001}
1002
1003
Stephen Hemminger45bada62005-06-27 11:33:12 -07001004/* Convert mode to MII values */
1005static const u16 phy_pause_map[] = {
1006 [FLOW_MODE_NONE] = 0,
1007 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
1008 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
1009 [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
1010};
1011
1012
1013/* Check status of Broadcom phy link */
1014static void bcom_check_link(struct skge_hw *hw, int port)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001015{
Stephen Hemminger45bada62005-06-27 11:33:12 -07001016 struct net_device *dev = hw->dev[port];
1017 struct skge_port *skge = netdev_priv(dev);
1018 u16 status;
1019
1020 /* read twice because of latch */
1021 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1022 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1023
1024 pr_debug("bcom_check_link status=0x%x\n", status);
1025
1026 if ((status & PHY_ST_LSYNC) == 0) {
1027 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1028 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1029 xm_write16(hw, port, XM_MMU_CMD, cmd);
1030 /* dummy read to ensure writing */
1031 (void) xm_read16(hw, port, XM_MMU_CMD);
1032
1033 if (netif_carrier_ok(dev))
1034 skge_link_down(skge);
1035 } else {
1036 if (skge->autoneg == AUTONEG_ENABLE &&
1037 (status & PHY_ST_AN_OVER)) {
1038 u16 lpa = xm_phy_read(hw, port, PHY_BCOM_AUNE_LP);
1039 u16 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1040
1041 if (lpa & PHY_B_AN_RF) {
1042 printk(KERN_NOTICE PFX "%s: remote fault\n",
1043 dev->name);
1044 return;
1045 }
1046
1047 /* Check Duplex mismatch */
Stephen Hemminger2c668512005-07-22 16:26:07 -07001048 switch (aux & PHY_B_AS_AN_RES_MSK) {
Stephen Hemminger45bada62005-06-27 11:33:12 -07001049 case PHY_B_RES_1000FD:
1050 skge->duplex = DUPLEX_FULL;
1051 break;
1052 case PHY_B_RES_1000HD:
1053 skge->duplex = DUPLEX_HALF;
1054 break;
1055 default:
1056 printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
1057 dev->name);
1058 return;
1059 }
1060
1061
1062 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1063 switch (aux & PHY_B_AS_PAUSE_MSK) {
1064 case PHY_B_AS_PAUSE_MSK:
1065 skge->flow_control = FLOW_MODE_SYMMETRIC;
1066 break;
1067 case PHY_B_AS_PRR:
1068 skge->flow_control = FLOW_MODE_REM_SEND;
1069 break;
1070 case PHY_B_AS_PRT:
1071 skge->flow_control = FLOW_MODE_LOC_SEND;
1072 break;
1073 default:
1074 skge->flow_control = FLOW_MODE_NONE;
1075 }
1076
1077 skge->speed = SPEED_1000;
1078 }
1079
1080 if (!netif_carrier_ok(dev))
1081 genesis_link_up(skge);
1082 }
1083}
1084
1085/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1086 * Phy on for 100 or 10Mbit operation
1087 */
1088static void bcom_phy_init(struct skge_port *skge, int jumbo)
1089{
1090 struct skge_hw *hw = skge->hw;
1091 int port = skge->port;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001092 int i;
Stephen Hemminger45bada62005-06-27 11:33:12 -07001093 u16 id1, r, ext, ctl;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001094
1095 /* magic workaround patterns for Broadcom */
1096 static const struct {
1097 u16 reg;
1098 u16 val;
1099 } A1hack[] = {
1100 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1101 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1102 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1103 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1104 }, C0hack[] = {
1105 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1106 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1107 };
1108
Stephen Hemminger45bada62005-06-27 11:33:12 -07001109 pr_debug("bcom_phy_init\n");
1110
1111 /* read Id from external PHY (all have the same address) */
1112 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1113
1114 /* Optimize MDIO transfer by suppressing preamble. */
1115 r = xm_read16(hw, port, XM_MMU_CMD);
1116 r |= XM_MMU_NO_PRE;
1117 xm_write16(hw, port, XM_MMU_CMD,r);
1118
Stephen Hemminger2c668512005-07-22 16:26:07 -07001119 switch (id1) {
Stephen Hemminger45bada62005-06-27 11:33:12 -07001120 case PHY_BCOM_ID1_C0:
1121 /*
1122 * Workaround BCOM Errata for the C0 type.
1123 * Write magic patterns to reserved registers.
1124 */
1125 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1126 xm_phy_write(hw, port,
1127 C0hack[i].reg, C0hack[i].val);
1128
1129 break;
1130 case PHY_BCOM_ID1_A1:
1131 /*
1132 * Workaround BCOM Errata for the A1 type.
1133 * Write magic patterns to reserved registers.
1134 */
1135 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1136 xm_phy_write(hw, port,
1137 A1hack[i].reg, A1hack[i].val);
1138 break;
1139 }
1140
1141 /*
1142 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1143 * Disable Power Management after reset.
1144 */
1145 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1146 r |= PHY_B_AC_DIS_PM;
1147 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1148
1149 /* Dummy read */
1150 xm_read16(hw, port, XM_ISRC);
1151
1152 ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1153 ctl = PHY_CT_SP1000; /* always 1000mbit */
1154
1155 if (skge->autoneg == AUTONEG_ENABLE) {
1156 /*
1157 * Workaround BCOM Errata #1 for the C5 type.
1158 * 1000Base-T Link Acquisition Failure in Slave Mode
1159 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1160 */
1161 u16 adv = PHY_B_1000C_RD;
1162 if (skge->advertising & ADVERTISED_1000baseT_Half)
1163 adv |= PHY_B_1000C_AHD;
1164 if (skge->advertising & ADVERTISED_1000baseT_Full)
1165 adv |= PHY_B_1000C_AFD;
1166 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1167
1168 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1169 } else {
1170 if (skge->duplex == DUPLEX_FULL)
1171 ctl |= PHY_CT_DUP_MD;
1172 /* Force to slave */
1173 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1174 }
1175
1176 /* Set autonegotiation pause parameters */
1177 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1178 phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1179
1180 /* Handle Jumbo frames */
1181 if (jumbo) {
1182 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1183 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1184
1185 ext |= PHY_B_PEC_HIGH_LA;
1186
1187 }
1188
1189 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1190 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1191
1192 /* Use link status change interrrupt */
1193 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1194
1195 bcom_check_link(hw, port);
1196}
1197
1198static void genesis_mac_init(struct skge_hw *hw, int port)
1199{
1200 struct net_device *dev = hw->dev[port];
1201 struct skge_port *skge = netdev_priv(dev);
1202 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1203 int i;
1204 u32 r;
1205 const u8 zero[6] = { 0 };
1206
1207 /* Clear MIB counters */
1208 xm_write16(hw, port, XM_STAT_CMD,
1209 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1210 /* Clear two times according to Errata #3 */
1211 xm_write16(hw, port, XM_STAT_CMD,
1212 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001213
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001214 /* Unreset the XMAC. */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001215 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001216
1217 /*
1218 * Perform additional initialization for external PHYs,
1219 * namely for the 1000baseTX cards that use the XMAC's
1220 * GMII mode.
1221 */
Stephen Hemminger45bada62005-06-27 11:33:12 -07001222 /* Take external Phy out of reset */
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001223 r = skge_read32(hw, B2_GP_IO);
1224 if (port == 0)
1225 r |= GP_DIR_0|GP_IO_0;
1226 else
1227 r |= GP_DIR_2|GP_IO_2;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001228
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001229 skge_write32(hw, B2_GP_IO, r);
1230 skge_read32(hw, B2_GP_IO);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001231
Stephen Hemminger45bada62005-06-27 11:33:12 -07001232 /* Enable GMII interfac */
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001233 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001234
Stephen Hemminger45bada62005-06-27 11:33:12 -07001235 bcom_phy_init(skge, jumbo);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001236
Stephen Hemminger45bada62005-06-27 11:33:12 -07001237 /* Set Station Address */
1238 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001239
Stephen Hemminger45bada62005-06-27 11:33:12 -07001240 /* We don't use match addresses so clear */
1241 for (i = 1; i < 16; i++)
1242 xm_outaddr(hw, port, XM_EXM(i), zero);
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001243
Stephen Hemminger45bada62005-06-27 11:33:12 -07001244 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1245 xm_write16(hw, port, XM_RX_HI_WM, 1450);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001246
1247 /* We don't need the FCS appended to the packet. */
Stephen Hemminger45bada62005-06-27 11:33:12 -07001248 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1249 if (jumbo)
1250 r |= XM_RX_BIG_PK_OK;
1251
1252 if (skge->duplex == DUPLEX_HALF) {
1253 /*
1254 * If in manual half duplex mode the other side might be in
1255 * full duplex mode, so ignore if a carrier extension is not seen
1256 * on frames received
1257 */
1258 r |= XM_RX_DIS_CEXT;
1259 }
1260 xm_write16(hw, port, XM_RX_CMD, r);
1261
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001262
1263 /* We want short frames padded to 60 bytes. */
Stephen Hemminger45bada62005-06-27 11:33:12 -07001264 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1265
1266 /*
1267 * Bump up the transmit threshold. This helps hold off transmit
1268 * underruns when we're blasting traffic from both ports at once.
1269 */
1270 xm_write16(hw, port, XM_TX_THR, 512);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001271
1272 /*
1273 * Enable the reception of all error frames. This is is
1274 * a necessary evil due to the design of the XMAC. The
1275 * XMAC's receive FIFO is only 8K in size, however jumbo
1276 * frames can be up to 9000 bytes in length. When bad
1277 * frame filtering is enabled, the XMAC's RX FIFO operates
1278 * in 'store and forward' mode. For this to work, the
1279 * entire frame has to fit into the FIFO, but that means
1280 * that jumbo frames larger than 8192 bytes will be
1281 * truncated. Disabling all bad frame filtering causes
1282 * the RX FIFO to operate in streaming mode, in which
1283 * case the XMAC will start transfering frames out of the
1284 * RX FIFO as soon as the FIFO threshold is reached.
1285 */
Stephen Hemminger45bada62005-06-27 11:33:12 -07001286 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001287
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001288
1289 /*
Stephen Hemminger45bada62005-06-27 11:33:12 -07001290 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1291 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1292 * and 'Octets Rx OK Hi Cnt Ov'.
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001293 */
Stephen Hemminger45bada62005-06-27 11:33:12 -07001294 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1295
1296 /*
1297 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1298 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1299 * and 'Octets Tx OK Hi Cnt Ov'.
1300 */
1301 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001302
1303 /* Configure MAC arbiter */
1304 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1305
1306 /* configure timeout values */
1307 skge_write8(hw, B3_MA_TOINI_RX1, 72);
1308 skge_write8(hw, B3_MA_TOINI_RX2, 72);
1309 skge_write8(hw, B3_MA_TOINI_TX1, 72);
1310 skge_write8(hw, B3_MA_TOINI_TX2, 72);
1311
1312 skge_write8(hw, B3_MA_RCINI_RX1, 0);
1313 skge_write8(hw, B3_MA_RCINI_RX2, 0);
1314 skge_write8(hw, B3_MA_RCINI_TX1, 0);
1315 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1316
1317 /* Configure Rx MAC FIFO */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001318 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1319 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1320 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001321
1322 /* Configure Tx MAC FIFO */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001323 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1324 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1325 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001326
Stephen Hemminger45bada62005-06-27 11:33:12 -07001327 if (jumbo) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001328 /* Enable frame flushing if jumbo frames used */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001329 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001330 } else {
1331 /* enable timeout timers if normal frames */
1332 skge_write16(hw, B3_PA_CTRL,
Stephen Hemminger45bada62005-06-27 11:33:12 -07001333 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001334 }
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001335}
1336
1337static void genesis_stop(struct skge_port *skge)
1338{
1339 struct skge_hw *hw = skge->hw;
1340 int port = skge->port;
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001341 u32 reg;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001342
1343 /* Clear Tx packet arbiter timeout IRQ */
1344 skge_write16(hw, B3_PA_CTRL,
1345 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1346
1347 /*
1348 * If the transfer stucks at the MAC the STOP command will not
1349 * terminate if we don't flush the XMAC's transmit FIFO !
1350 */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001351 xm_write32(hw, port, XM_MODE,
1352 xm_read32(hw, port, XM_MODE)|XM_MD_FTF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001353
1354
1355 /* Reset the MAC */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001356 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001357
1358 /* For external PHYs there must be special handling */
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001359 reg = skge_read32(hw, B2_GP_IO);
1360 if (port == 0) {
1361 reg |= GP_DIR_0;
1362 reg &= ~GP_IO_0;
1363 } else {
1364 reg |= GP_DIR_2;
1365 reg &= ~GP_IO_2;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001366 }
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001367 skge_write32(hw, B2_GP_IO, reg);
1368 skge_read32(hw, B2_GP_IO);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001369
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001370 xm_write16(hw, port, XM_MMU_CMD,
1371 xm_read16(hw, port, XM_MMU_CMD)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001372 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1373
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001374 xm_read16(hw, port, XM_MMU_CMD);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001375}
1376
1377
1378static void genesis_get_stats(struct skge_port *skge, u64 *data)
1379{
1380 struct skge_hw *hw = skge->hw;
1381 int port = skge->port;
1382 int i;
1383 unsigned long timeout = jiffies + HZ;
1384
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001385 xm_write16(hw, port,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001386 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1387
1388 /* wait for update to complete */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001389 while (xm_read16(hw, port, XM_STAT_CMD)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001390 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1391 if (time_after(jiffies, timeout))
1392 break;
1393 udelay(10);
1394 }
1395
1396 /* special case for 64 bit octet counter */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001397 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1398 | xm_read32(hw, port, XM_TXO_OK_LO);
1399 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1400 | xm_read32(hw, port, XM_RXO_OK_LO);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001401
1402 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001403 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001404}
1405
1406static void genesis_mac_intr(struct skge_hw *hw, int port)
1407{
1408 struct skge_port *skge = netdev_priv(hw->dev[port]);
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001409 u16 status = xm_read16(hw, port, XM_ISRC);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001410
Stephen Hemminger7e676d92005-06-27 11:33:13 -07001411 if (netif_msg_intr(skge))
1412 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1413 skge->netdev->name, status);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001414
1415 if (status & XM_IS_TXF_UR) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001416 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001417 ++skge->net_stats.tx_fifo_errors;
1418 }
1419 if (status & XM_IS_RXF_OV) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001420 xm_write32(hw, port, XM_MODE, XM_MD_FRF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001421 ++skge->net_stats.rx_fifo_errors;
1422 }
1423}
1424
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001425static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001426{
1427 int i;
1428
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001429 gma_write16(hw, port, GM_SMI_DATA, val);
1430 gma_write16(hw, port, GM_SMI_CTRL,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001431 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1432 for (i = 0; i < PHY_RETRIES; i++) {
1433 udelay(1);
1434
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001435 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001436 break;
1437 }
1438}
1439
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001440static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001441{
1442 int i;
1443
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001444 gma_write16(hw, port, GM_SMI_CTRL,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001445 GM_SMI_CT_PHY_AD(hw->phy_addr)
1446 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1447
1448 for (i = 0; i < PHY_RETRIES; i++) {
1449 udelay(1);
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001450 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001451 goto ready;
1452 }
1453
1454 printk(KERN_WARNING PFX "%s: phy read timeout\n",
1455 hw->dev[port]->name);
1456 return 0;
1457 ready:
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001458 return gma_read16(hw, port, GM_SMI_DATA);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001459}
1460
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001461static void genesis_link_up(struct skge_port *skge)
1462{
1463 struct skge_hw *hw = skge->hw;
1464 int port = skge->port;
1465 u16 cmd;
1466 u32 mode, msk;
1467
1468 pr_debug("genesis_link_up\n");
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001469 cmd = xm_read16(hw, port, XM_MMU_CMD);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001470
1471 /*
1472 * enabling pause frame reception is required for 1000BT
1473 * because the XMAC is not reset if the link is going down
1474 */
1475 if (skge->flow_control == FLOW_MODE_NONE ||
1476 skge->flow_control == FLOW_MODE_LOC_SEND)
Stephen Hemminger7e676d92005-06-27 11:33:13 -07001477 /* Disable Pause Frame Reception */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001478 cmd |= XM_MMU_IGN_PF;
1479 else
1480 /* Enable Pause Frame Reception */
1481 cmd &= ~XM_MMU_IGN_PF;
1482
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001483 xm_write16(hw, port, XM_MMU_CMD, cmd);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001484
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001485 mode = xm_read32(hw, port, XM_MODE);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001486 if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
1487 skge->flow_control == FLOW_MODE_LOC_SEND) {
1488 /*
1489 * Configure Pause Frame Generation
1490 * Use internal and external Pause Frame Generation.
1491 * Sending pause frames is edge triggered.
1492 * Send a Pause frame with the maximum pause time if
1493 * internal oder external FIFO full condition occurs.
1494 * Send a zero pause time frame to re-start transmission.
1495 */
1496 /* XM_PAUSE_DA = '010000C28001' (default) */
1497 /* XM_MAC_PTIME = 0xffff (maximum) */
1498 /* remember this value is defined in big endian (!) */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001499 xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001500
1501 mode |= XM_PAUSE_MODE;
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001502 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001503 } else {
1504 /*
1505 * disable pause frame generation is required for 1000BT
1506 * because the XMAC is not reset if the link is going down
1507 */
1508 /* Disable Pause Mode in Mode Register */
1509 mode &= ~XM_PAUSE_MODE;
1510
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001511 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001512 }
1513
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001514 xm_write32(hw, port, XM_MODE, mode);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001515
1516 msk = XM_DEF_MSK;
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001517 /* disable GP0 interrupt bit for external Phy */
1518 msk |= XM_IS_INP_ASS;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001519
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001520 xm_write16(hw, port, XM_IMSK, msk);
1521 xm_read16(hw, port, XM_ISRC);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001522
1523 /* get MMU Command Reg. */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001524 cmd = xm_read16(hw, port, XM_MMU_CMD);
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001525 if (skge->duplex == DUPLEX_FULL)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001526 cmd |= XM_MMU_GMII_FD;
1527
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07001528 /*
1529 * Workaround BCOM Errata (#10523) for all BCom Phys
1530 * Enable Power Management after link up
1531 */
1532 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1533 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1534 & ~PHY_B_AC_DIS_PM);
1535 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001536
1537 /* enable Rx/Tx */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001538 xm_write16(hw, port, XM_MMU_CMD,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001539 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1540 skge_link_up(skge);
1541}
1542
1543
Stephen Hemminger45bada62005-06-27 11:33:12 -07001544static inline void bcom_phy_intr(struct skge_port *skge)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001545{
1546 struct skge_hw *hw = skge->hw;
1547 int port = skge->port;
Stephen Hemminger45bada62005-06-27 11:33:12 -07001548 u16 isrc;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001549
Stephen Hemminger45bada62005-06-27 11:33:12 -07001550 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
Stephen Hemminger7e676d92005-06-27 11:33:13 -07001551 if (netif_msg_intr(skge))
1552 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
1553 skge->netdev->name, isrc);
Stephen Hemminger45bada62005-06-27 11:33:12 -07001554
1555 if (isrc & PHY_B_IS_PSE)
1556 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
1557 hw->dev[port]->name);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001558
1559 /* Workaround BCom Errata:
1560 * enable and disable loopback mode if "NO HCD" occurs.
1561 */
Stephen Hemminger45bada62005-06-27 11:33:12 -07001562 if (isrc & PHY_B_IS_NO_HDCL) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001563 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1564 xm_phy_write(hw, port, PHY_BCOM_CTRL,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001565 ctrl | PHY_CT_LOOP);
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001566 xm_phy_write(hw, port, PHY_BCOM_CTRL,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001567 ctrl & ~PHY_CT_LOOP);
1568 }
1569
Stephen Hemminger45bada62005-06-27 11:33:12 -07001570 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1571 bcom_check_link(hw, port);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001572
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001573}
1574
1575/* Marvell Phy Initailization */
1576static void yukon_init(struct skge_hw *hw, int port)
1577{
1578 struct skge_port *skge = netdev_priv(hw->dev[port]);
1579 u16 ctrl, ct1000, adv;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001580
1581 pr_debug("yukon_init\n");
1582 if (skge->autoneg == AUTONEG_ENABLE) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001583 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001584
1585 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1586 PHY_M_EC_MAC_S_MSK);
1587 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1588
Stephen Hemmingerc506a502005-06-27 11:33:09 -07001589 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001590
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001591 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001592 }
1593
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001594 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001595 if (skge->autoneg == AUTONEG_DISABLE)
1596 ctrl &= ~PHY_CT_ANE;
1597
1598 ctrl |= PHY_CT_RESET;
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001599 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001600
1601 ctrl = 0;
1602 ct1000 = 0;
Stephen Hemmingerb18f2092005-06-27 11:33:08 -07001603 adv = PHY_AN_CSMA;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001604
1605 if (skge->autoneg == AUTONEG_ENABLE) {
Stephen Hemminger5e1705d2005-08-16 14:00:58 -07001606 if (hw->copper) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001607 if (skge->advertising & ADVERTISED_1000baseT_Full)
1608 ct1000 |= PHY_M_1000C_AFD;
1609 if (skge->advertising & ADVERTISED_1000baseT_Half)
1610 ct1000 |= PHY_M_1000C_AHD;
1611 if (skge->advertising & ADVERTISED_100baseT_Full)
1612 adv |= PHY_M_AN_100_FD;
1613 if (skge->advertising & ADVERTISED_100baseT_Half)
1614 adv |= PHY_M_AN_100_HD;
1615 if (skge->advertising & ADVERTISED_10baseT_Full)
1616 adv |= PHY_M_AN_10_FD;
1617 if (skge->advertising & ADVERTISED_10baseT_Half)
1618 adv |= PHY_M_AN_10_HD;
Stephen Hemminger45bada62005-06-27 11:33:12 -07001619 } else /* special defines for FIBER (88E1011S only) */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001620 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1621
Stephen Hemminger45bada62005-06-27 11:33:12 -07001622 /* Set Flow-control capabilities */
1623 adv |= phy_pause_map[skge->flow_control];
1624
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001625 /* Restart Auto-negotiation */
1626 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1627 } else {
1628 /* forced speed/duplex settings */
1629 ct1000 = PHY_M_1000C_MSE;
1630
1631 if (skge->duplex == DUPLEX_FULL)
1632 ctrl |= PHY_CT_DUP_MD;
1633
1634 switch (skge->speed) {
1635 case SPEED_1000:
1636 ctrl |= PHY_CT_SP1000;
1637 break;
1638 case SPEED_100:
1639 ctrl |= PHY_CT_SP100;
1640 break;
1641 }
1642
1643 ctrl |= PHY_CT_RESET;
1644 }
1645
Stephen Hemmingerc506a502005-06-27 11:33:09 -07001646 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001647
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001648 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1649 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001650
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001651 /* Enable phy interrupt on autonegotiation complete (or link up) */
1652 if (skge->autoneg == AUTONEG_ENABLE)
Stephen Hemminger4cde06e2005-07-22 16:26:09 -07001653 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001654 else
Stephen Hemminger4cde06e2005-07-22 16:26:09 -07001655 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001656}
1657
1658static void yukon_reset(struct skge_hw *hw, int port)
1659{
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001660 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1661 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1662 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1663 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1664 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001665
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001666 gma_write16(hw, port, GM_RX_CTRL,
1667 gma_read16(hw, port, GM_RX_CTRL)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001668 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1669}
1670
1671static void yukon_mac_init(struct skge_hw *hw, int port)
1672{
1673 struct skge_port *skge = netdev_priv(hw->dev[port]);
1674 int i;
1675 u32 reg;
1676 const u8 *addr = hw->dev[port]->dev_addr;
1677
1678 /* WA code for COMA mode -- set PHY reset */
1679 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
Stephen Hemminger38231712005-07-22 16:26:06 -07001680 hw->chip_rev >= CHIP_REV_YU_LITE_A3)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001681 skge_write32(hw, B2_GP_IO,
1682 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9));
1683
1684 /* hard reset */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001685 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1686 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001687
1688 /* WA code for COMA mode -- clear PHY reset */
1689 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
Stephen Hemminger38231712005-07-22 16:26:06 -07001690 hw->chip_rev >= CHIP_REV_YU_LITE_A3)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001691 skge_write32(hw, B2_GP_IO,
1692 (skge_read32(hw, B2_GP_IO) | GP_DIR_9)
1693 & ~GP_IO_9);
1694
1695 /* Set hardware config mode */
1696 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1697 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
Stephen Hemminger5e1705d2005-08-16 14:00:58 -07001698 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001699
1700 /* Clear GMC reset */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001701 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1702 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1703 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001704 if (skge->autoneg == AUTONEG_DISABLE) {
1705 reg = GM_GPCR_AU_ALL_DIS;
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001706 gma_write16(hw, port, GM_GP_CTRL,
1707 gma_read16(hw, port, GM_GP_CTRL) | reg);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001708
1709 switch (skge->speed) {
1710 case SPEED_1000:
1711 reg |= GM_GPCR_SPEED_1000;
1712 /* fallthru */
1713 case SPEED_100:
1714 reg |= GM_GPCR_SPEED_100;
1715 }
1716
1717 if (skge->duplex == DUPLEX_FULL)
1718 reg |= GM_GPCR_DUP_FULL;
1719 } else
1720 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1721 switch (skge->flow_control) {
1722 case FLOW_MODE_NONE:
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001723 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001724 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1725 break;
1726 case FLOW_MODE_LOC_SEND:
1727 /* disable Rx flow-control */
1728 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1729 }
1730
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001731 gma_write16(hw, port, GM_GP_CTRL, reg);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001732 skge_read16(hw, GMAC_IRQ_SRC);
1733
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001734 yukon_init(hw, port);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001735
1736 /* MIB clear */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001737 reg = gma_read16(hw, port, GM_PHY_ADDR);
1738 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001739
1740 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001741 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1742 gma_write16(hw, port, GM_PHY_ADDR, reg);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001743
1744 /* transmit control */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001745 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001746
1747 /* receive control reg: unicast + multicast + no FCS */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001748 gma_write16(hw, port, GM_RX_CTRL,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001749 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1750
1751 /* transmit flow control */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001752 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001753
1754 /* transmit parameter */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001755 gma_write16(hw, port, GM_TX_PARAM,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001756 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1757 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1758 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
1759
1760 /* serial mode register */
1761 reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1762 if (hw->dev[port]->mtu > 1500)
1763 reg |= GM_SMOD_JUMBO_ENA;
1764
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001765 gma_write16(hw, port, GM_SERIAL_MODE, reg);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001766
1767 /* physical address: used for pause frames */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001768 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001769 /* virtual address for data */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001770 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001771
1772 /* enable interrupt mask for counter overflows */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001773 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1774 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1775 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001776
1777 /* Initialize Mac Fifo */
1778
1779 /* Configure Rx MAC FIFO */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001780 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001781 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1782 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
Stephen Hemminger38231712005-07-22 16:26:06 -07001783 hw->chip_rev >= CHIP_REV_YU_LITE_A3)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001784 reg &= ~GMF_RX_F_FL_ON;
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001785 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1786 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
Stephen Hemmingerc5923082005-08-16 14:01:02 -07001787 /*
1788 * because Pause Packet Truncation in GMAC is not working
1789 * we have to increase the Flush Threshold to 64 bytes
1790 * in order to flush pause packets in Rx FIFO on Yukon-1
1791 */
1792 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001793
1794 /* Configure Tx MAC FIFO */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001795 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1796 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001797}
1798
1799static void yukon_stop(struct skge_port *skge)
1800{
1801 struct skge_hw *hw = skge->hw;
1802 int port = skge->port;
1803
1804 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
Stephen Hemminger38231712005-07-22 16:26:06 -07001805 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001806 skge_write32(hw, B2_GP_IO,
1807 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1808 }
1809
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001810 gma_write16(hw, port, GM_GP_CTRL,
1811 gma_read16(hw, port, GM_GP_CTRL)
Stephen Hemminger0eedf4a2005-07-22 16:26:04 -07001812 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001813 gma_read16(hw, port, GM_GP_CTRL);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001814
1815 /* set GPHY Control reset */
Stephen Hemmingerd8a09942005-07-22 16:26:08 -07001816 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1817 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001818}
1819
1820static void yukon_get_stats(struct skge_port *skge, u64 *data)
1821{
1822 struct skge_hw *hw = skge->hw;
1823 int port = skge->port;
1824 int i;
1825
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001826 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
1827 | gma_read32(hw, port, GM_TXO_OK_LO);
1828 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
1829 | gma_read32(hw, port, GM_RXO_OK_LO);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001830
1831 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001832 data[i] = gma_read32(hw, port,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001833 skge_stats[i].gma_offset);
1834}
1835
1836static void yukon_mac_intr(struct skge_hw *hw, int port)
1837{
Stephen Hemminger7e676d92005-06-27 11:33:13 -07001838 struct net_device *dev = hw->dev[port];
1839 struct skge_port *skge = netdev_priv(dev);
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001840 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001841
Stephen Hemminger7e676d92005-06-27 11:33:13 -07001842 if (netif_msg_intr(skge))
1843 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1844 dev->name, status);
1845
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001846 if (status & GM_IS_RX_FF_OR) {
1847 ++skge->net_stats.rx_fifo_errors;
Stephen Hemmingerd8a09942005-07-22 16:26:08 -07001848 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001849 }
Stephen Hemmingerd8a09942005-07-22 16:26:08 -07001850
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001851 if (status & GM_IS_TX_FF_UR) {
1852 ++skge->net_stats.tx_fifo_errors;
Stephen Hemmingerd8a09942005-07-22 16:26:08 -07001853 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001854 }
1855
1856}
1857
1858static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
1859{
Stephen Hemminger95566062005-06-27 11:33:02 -07001860 switch (aux & PHY_M_PS_SPEED_MSK) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001861 case PHY_M_PS_SPEED_1000:
1862 return SPEED_1000;
1863 case PHY_M_PS_SPEED_100:
1864 return SPEED_100;
1865 default:
1866 return SPEED_10;
1867 }
1868}
1869
1870static void yukon_link_up(struct skge_port *skge)
1871{
1872 struct skge_hw *hw = skge->hw;
1873 int port = skge->port;
1874 u16 reg;
1875
1876 pr_debug("yukon_link_up\n");
1877
1878 /* Enable Transmit FIFO Underrun */
1879 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
1880
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001881 reg = gma_read16(hw, port, GM_GP_CTRL);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001882 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1883 reg |= GM_GPCR_DUP_FULL;
1884
1885 /* enable Rx/Tx */
1886 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001887 gma_write16(hw, port, GM_GP_CTRL, reg);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001888
Stephen Hemminger4cde06e2005-07-22 16:26:09 -07001889 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001890 skge_link_up(skge);
1891}
1892
1893static void yukon_link_down(struct skge_port *skge)
1894{
1895 struct skge_hw *hw = skge->hw;
1896 int port = skge->port;
Stephen Hemmingerd8a09942005-07-22 16:26:08 -07001897 u16 ctrl;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001898
1899 pr_debug("yukon_link_down\n");
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001900 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
Stephen Hemmingerd8a09942005-07-22 16:26:08 -07001901
1902 ctrl = gma_read16(hw, port, GM_GP_CTRL);
1903 ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1904 gma_write16(hw, port, GM_GP_CTRL, ctrl);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001905
Stephen Hemmingerc506a502005-06-27 11:33:09 -07001906 if (skge->flow_control == FLOW_MODE_REM_SEND) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001907 /* restore Asymmetric Pause bit */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001908 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1909 gm_phy_read(hw, port,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001910 PHY_MARV_AUNE_ADV)
1911 | PHY_M_AN_ASP);
1912
1913 }
1914
1915 yukon_reset(hw, port);
1916 skge_link_down(skge);
1917
1918 yukon_init(hw, port);
1919}
1920
1921static void yukon_phy_intr(struct skge_port *skge)
1922{
1923 struct skge_hw *hw = skge->hw;
1924 int port = skge->port;
1925 const char *reason = NULL;
1926 u16 istatus, phystat;
1927
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001928 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1929 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
Stephen Hemminger7e676d92005-06-27 11:33:13 -07001930
1931 if (netif_msg_intr(skge))
1932 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
1933 skge->netdev->name, istatus, phystat);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001934
1935 if (istatus & PHY_M_IS_AN_COMPL) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001936 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001937 & PHY_M_AN_RF) {
1938 reason = "remote fault";
1939 goto failed;
1940 }
1941
Stephen Hemmingerc506a502005-06-27 11:33:09 -07001942 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001943 reason = "master/slave fault";
1944 goto failed;
1945 }
1946
1947 if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1948 reason = "speed/duplex";
1949 goto failed;
1950 }
1951
1952 skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1953 ? DUPLEX_FULL : DUPLEX_HALF;
1954 skge->speed = yukon_speed(hw, phystat);
1955
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001956 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1957 switch (phystat & PHY_M_PS_PAUSE_MSK) {
1958 case PHY_M_PS_PAUSE_MSK:
1959 skge->flow_control = FLOW_MODE_SYMMETRIC;
1960 break;
1961 case PHY_M_PS_RX_P_EN:
1962 skge->flow_control = FLOW_MODE_REM_SEND;
1963 break;
1964 case PHY_M_PS_TX_P_EN:
1965 skge->flow_control = FLOW_MODE_LOC_SEND;
1966 break;
1967 default:
1968 skge->flow_control = FLOW_MODE_NONE;
1969 }
1970
1971 if (skge->flow_control == FLOW_MODE_NONE ||
1972 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001973 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001974 else
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07001975 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04001976 yukon_link_up(skge);
1977 return;
1978 }
1979
1980 if (istatus & PHY_M_IS_LSP_CHANGE)
1981 skge->speed = yukon_speed(hw, phystat);
1982
1983 if (istatus & PHY_M_IS_DUP_CHANGE)
1984 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1985 if (istatus & PHY_M_IS_LST_CHANGE) {
1986 if (phystat & PHY_M_PS_LINK_UP)
1987 yukon_link_up(skge);
1988 else
1989 yukon_link_down(skge);
1990 }
1991 return;
1992 failed:
1993 printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n",
1994 skge->netdev->name, reason);
1995
1996 /* XXX restart autonegotiation? */
1997}
1998
1999static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
2000{
2001 u32 end;
2002
2003 start /= 8;
2004 len /= 8;
2005 end = start + len - 1;
2006
2007 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
2008 skge_write32(hw, RB_ADDR(q, RB_START), start);
2009 skge_write32(hw, RB_ADDR(q, RB_WP), start);
2010 skge_write32(hw, RB_ADDR(q, RB_RP), start);
2011 skge_write32(hw, RB_ADDR(q, RB_END), end);
2012
2013 if (q == Q_R1 || q == Q_R2) {
2014 /* Set thresholds on receive queue's */
2015 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
2016 start + (2*len)/3);
2017 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
2018 start + (len/3));
2019 } else {
2020 /* Enable store & forward on Tx queue's because
2021 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2022 */
2023 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
2024 }
2025
2026 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
2027}
2028
2029/* Setup Bus Memory Interface */
2030static void skge_qset(struct skge_port *skge, u16 q,
2031 const struct skge_element *e)
2032{
2033 struct skge_hw *hw = skge->hw;
2034 u32 watermark = 0x600;
2035 u64 base = skge->dma + (e->desc - skge->mem);
2036
2037 /* optimization to reduce window on 32bit/33mhz */
2038 if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
2039 watermark /= 2;
2040
2041 skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
2042 skge_write32(hw, Q_ADDR(q, Q_F), watermark);
2043 skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
2044 skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
2045}
2046
2047static int skge_up(struct net_device *dev)
2048{
2049 struct skge_port *skge = netdev_priv(dev);
2050 struct skge_hw *hw = skge->hw;
2051 int port = skge->port;
2052 u32 chunk, ram_addr;
2053 size_t rx_size, tx_size;
2054 int err;
2055
2056 if (netif_msg_ifup(skge))
2057 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2058
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002059 if (dev->mtu > RX_BUF_SIZE)
2060 skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN;
2061 else
2062 skge->rx_buf_size = RX_BUF_SIZE;
2063
2064
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002065 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2066 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2067 skge->mem_size = tx_size + rx_size;
2068 skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
2069 if (!skge->mem)
2070 return -ENOMEM;
2071
2072 memset(skge->mem, 0, skge->mem_size);
2073
2074 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
2075 goto free_pci_mem;
2076
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002077 err = skge_rx_fill(skge);
2078 if (err)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002079 goto free_rx_ring;
2080
2081 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2082 skge->dma + rx_size)))
2083 goto free_rx_ring;
2084
2085 skge->tx_avail = skge->tx_ring.count - 1;
2086
Stephen Hemminger7e676d92005-06-27 11:33:13 -07002087 /* Enable IRQ from port */
2088 hw->intr_mask |= portirqmask[port];
2089 skge_write32(hw, B0_IMSK, hw->intr_mask);
2090
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002091 /* Initialze MAC */
Stephen Hemminger4ff6ac02005-07-22 16:26:05 -07002092 spin_lock_bh(&hw->phy_lock);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002093 if (hw->chip_id == CHIP_ID_GENESIS)
2094 genesis_mac_init(hw, port);
2095 else
2096 yukon_mac_init(hw, port);
Stephen Hemminger4ff6ac02005-07-22 16:26:05 -07002097 spin_unlock_bh(&hw->phy_lock);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002098
2099 /* Configure RAMbuffers */
Stephen Hemminger981d0372005-06-27 11:33:06 -07002100 chunk = hw->ram_size / ((hw->ports + 1)*2);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002101 ram_addr = hw->ram_offset + 2 * chunk * port;
2102
2103 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
2104 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
2105
2106 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
2107 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
2108 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
2109
2110 /* Start receiver BMU */
2111 wmb();
2112 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
Stephen Hemminger6abebb52005-07-22 16:26:10 -07002113 skge_led(skge, LED_MODE_ON);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002114
2115 pr_debug("skge_up completed\n");
2116 return 0;
2117
2118 free_rx_ring:
2119 skge_rx_clean(skge);
2120 kfree(skge->rx_ring.start);
2121 free_pci_mem:
2122 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2123
2124 return err;
2125}
2126
2127static int skge_down(struct net_device *dev)
2128{
2129 struct skge_port *skge = netdev_priv(dev);
2130 struct skge_hw *hw = skge->hw;
2131 int port = skge->port;
2132
2133 if (netif_msg_ifdown(skge))
2134 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2135
2136 netif_stop_queue(dev);
2137
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002138 /* Stop transmitter */
2139 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2140 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2141 RB_RST_SET|RB_DIS_OP_MD);
2142
2143 if (hw->chip_id == CHIP_ID_GENESIS)
2144 genesis_stop(skge);
2145 else
2146 yukon_stop(skge);
2147
2148 /* Disable Force Sync bit and Enable Alloc bit */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002149 skge_write8(hw, SK_REG(port, TXA_CTRL),
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002150 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2151
2152 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002153 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2154 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002155
2156 /* Reset PCI FIFO */
2157 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
2158 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2159
2160 /* Reset the RAM Buffer async Tx queue */
2161 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
2162 /* stop receiver */
2163 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
2164 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2165 RB_RST_SET|RB_DIS_OP_MD);
2166 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2167
2168 if (hw->chip_id == CHIP_ID_GENESIS) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002169 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2170 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002171 } else {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002172 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2173 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002174 }
2175
Stephen Hemminger6abebb52005-07-22 16:26:10 -07002176 skge_led(skge, LED_MODE_OFF);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002177
2178 skge_tx_clean(skge);
2179 skge_rx_clean(skge);
2180
2181 kfree(skge->rx_ring.start);
2182 kfree(skge->tx_ring.start);
2183 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2184 return 0;
2185}
2186
2187static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2188{
2189 struct skge_port *skge = netdev_priv(dev);
2190 struct skge_hw *hw = skge->hw;
2191 struct skge_ring *ring = &skge->tx_ring;
2192 struct skge_element *e;
2193 struct skge_tx_desc *td;
2194 int i;
2195 u32 control, len;
2196 u64 map;
2197 unsigned long flags;
2198
2199 skb = skb_padto(skb, ETH_ZLEN);
2200 if (!skb)
2201 return NETDEV_TX_OK;
2202
2203 local_irq_save(flags);
2204 if (!spin_trylock(&skge->tx_lock)) {
Stephen Hemminger95566062005-06-27 11:33:02 -07002205 /* Collision - tell upper layer to requeue */
2206 local_irq_restore(flags);
2207 return NETDEV_TX_LOCKED;
2208 }
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002209
2210 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
2211 netif_stop_queue(dev);
2212 spin_unlock_irqrestore(&skge->tx_lock, flags);
2213
2214 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2215 dev->name);
2216 return NETDEV_TX_BUSY;
2217 }
2218
2219 e = ring->to_use;
2220 td = e->desc;
2221 e->skb = skb;
2222 len = skb_headlen(skb);
2223 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2224 pci_unmap_addr_set(e, mapaddr, map);
2225 pci_unmap_len_set(e, maplen, len);
2226
2227 td->dma_lo = map;
2228 td->dma_hi = map >> 32;
2229
2230 if (skb->ip_summed == CHECKSUM_HW) {
2231 const struct iphdr *ip
2232 = (const struct iphdr *) (skb->data + ETH_HLEN);
2233 int offset = skb->h.raw - skb->data;
2234
2235 /* This seems backwards, but it is what the sk98lin
2236 * does. Looks like hardware is wrong?
2237 */
2238 if (ip->protocol == IPPROTO_UDP
Stephen Hemminger981d0372005-06-27 11:33:06 -07002239 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002240 control = BMU_TCP_CHECK;
2241 else
2242 control = BMU_UDP_CHECK;
2243
2244 td->csum_offs = 0;
2245 td->csum_start = offset;
2246 td->csum_write = offset + skb->csum;
2247 } else
2248 control = BMU_CHECK;
2249
2250 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
2251 control |= BMU_EOF| BMU_IRQ_EOF;
2252 else {
2253 struct skge_tx_desc *tf = td;
2254
2255 control |= BMU_STFWD;
2256 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2257 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2258
2259 map = pci_map_page(hw->pdev, frag->page, frag->page_offset,
2260 frag->size, PCI_DMA_TODEVICE);
2261
2262 e = e->next;
2263 e->skb = NULL;
2264 tf = e->desc;
2265 tf->dma_lo = map;
2266 tf->dma_hi = (u64) map >> 32;
2267 pci_unmap_addr_set(e, mapaddr, map);
2268 pci_unmap_len_set(e, maplen, frag->size);
2269
2270 tf->control = BMU_OWN | BMU_SW | control | frag->size;
2271 }
2272 tf->control |= BMU_EOF | BMU_IRQ_EOF;
2273 }
2274 /* Make sure all the descriptors written */
2275 wmb();
2276 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2277 wmb();
2278
2279 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2280
2281 if (netif_msg_tx_queued(skge))
Al Viro0b2d7fe2005-04-03 09:15:52 +01002282 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002283 dev->name, e - ring->start, skb->len);
2284
2285 ring->to_use = e->next;
2286 skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1;
2287 if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
2288 pr_debug("%s: transmit queue full\n", dev->name);
2289 netif_stop_queue(dev);
2290 }
2291
2292 dev->trans_start = jiffies;
2293 spin_unlock_irqrestore(&skge->tx_lock, flags);
2294
2295 return NETDEV_TX_OK;
2296}
2297
2298static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
2299{
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002300 /* This ring element can be skb or fragment */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002301 if (e->skb) {
2302 pci_unmap_single(hw->pdev,
2303 pci_unmap_addr(e, mapaddr),
2304 pci_unmap_len(e, maplen),
2305 PCI_DMA_TODEVICE);
2306 dev_kfree_skb_any(e->skb);
2307 e->skb = NULL;
2308 } else {
2309 pci_unmap_page(hw->pdev,
2310 pci_unmap_addr(e, mapaddr),
2311 pci_unmap_len(e, maplen),
2312 PCI_DMA_TODEVICE);
2313 }
2314}
2315
2316static void skge_tx_clean(struct skge_port *skge)
2317{
2318 struct skge_ring *ring = &skge->tx_ring;
2319 struct skge_element *e;
2320 unsigned long flags;
2321
2322 spin_lock_irqsave(&skge->tx_lock, flags);
2323 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2324 ++skge->tx_avail;
2325 skge_tx_free(skge->hw, e);
2326 }
2327 ring->to_clean = e;
2328 spin_unlock_irqrestore(&skge->tx_lock, flags);
2329}
2330
2331static void skge_tx_timeout(struct net_device *dev)
2332{
2333 struct skge_port *skge = netdev_priv(dev);
2334
2335 if (netif_msg_timer(skge))
2336 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
2337
2338 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2339 skge_tx_clean(skge);
2340}
2341
2342static int skge_change_mtu(struct net_device *dev, int new_mtu)
2343{
2344 int err = 0;
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002345 int running = netif_running(dev);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002346
Stephen Hemminger95566062005-06-27 11:33:02 -07002347 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002348 return -EINVAL;
2349
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002350
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002351 if (running)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002352 skge_down(dev);
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002353 dev->mtu = new_mtu;
2354 if (running)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002355 skge_up(dev);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002356
2357 return err;
2358}
2359
2360static void genesis_set_multicast(struct net_device *dev)
2361{
2362 struct skge_port *skge = netdev_priv(dev);
2363 struct skge_hw *hw = skge->hw;
2364 int port = skge->port;
2365 int i, count = dev->mc_count;
2366 struct dev_mc_list *list = dev->mc_list;
2367 u32 mode;
2368 u8 filter[8];
2369
Stephen Hemminger45bada62005-06-27 11:33:12 -07002370 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2371
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002372 mode = xm_read32(hw, port, XM_MODE);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002373 mode |= XM_MD_ENA_HASH;
2374 if (dev->flags & IFF_PROMISC)
2375 mode |= XM_MD_ENA_PROM;
2376 else
2377 mode &= ~XM_MD_ENA_PROM;
2378
2379 if (dev->flags & IFF_ALLMULTI)
2380 memset(filter, 0xff, sizeof(filter));
2381 else {
2382 memset(filter, 0, sizeof(filter));
Stephen Hemminger95566062005-06-27 11:33:02 -07002383 for (i = 0; list && i < count; i++, list = list->next) {
Stephen Hemminger45bada62005-06-27 11:33:12 -07002384 u32 crc, bit;
2385 crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
2386 bit = ~crc & 0x3f;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002387 filter[bit/8] |= 1 << (bit%8);
2388 }
2389 }
2390
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002391 xm_write32(hw, port, XM_MODE, mode);
Stephen Hemminger45bada62005-06-27 11:33:12 -07002392 xm_outhash(hw, port, XM_HSM, filter);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002393}
2394
2395static void yukon_set_multicast(struct net_device *dev)
2396{
2397 struct skge_port *skge = netdev_priv(dev);
2398 struct skge_hw *hw = skge->hw;
2399 int port = skge->port;
2400 struct dev_mc_list *list = dev->mc_list;
2401 u16 reg;
2402 u8 filter[8];
2403
2404 memset(filter, 0, sizeof(filter));
2405
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002406 reg = gma_read16(hw, port, GM_RX_CTRL);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002407 reg |= GM_RXCR_UCF_ENA;
2408
2409 if (dev->flags & IFF_PROMISC) /* promiscious */
2410 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2411 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2412 memset(filter, 0xff, sizeof(filter));
2413 else if (dev->mc_count == 0) /* no multicast */
2414 reg &= ~GM_RXCR_MCF_ENA;
2415 else {
2416 int i;
2417 reg |= GM_RXCR_MCF_ENA;
2418
Stephen Hemminger95566062005-06-27 11:33:02 -07002419 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002420 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2421 filter[bit/8] |= 1 << (bit%8);
2422 }
2423 }
2424
2425
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002426 gma_write16(hw, port, GM_MC_ADDR_H1,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002427 (u16)filter[0] | ((u16)filter[1] << 8));
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002428 gma_write16(hw, port, GM_MC_ADDR_H2,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002429 (u16)filter[2] | ((u16)filter[3] << 8));
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002430 gma_write16(hw, port, GM_MC_ADDR_H3,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002431 (u16)filter[4] | ((u16)filter[5] << 8));
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002432 gma_write16(hw, port, GM_MC_ADDR_H4,
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002433 (u16)filter[6] | ((u16)filter[7] << 8));
2434
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002435 gma_write16(hw, port, GM_RX_CTRL, reg);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002436}
2437
2438static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2439{
2440 if (hw->chip_id == CHIP_ID_GENESIS)
2441 return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
2442 else
2443 return (status & GMR_FS_ANY_ERR) ||
2444 (status & GMR_FS_RX_OK) == 0;
2445}
2446
2447static void skge_rx_error(struct skge_port *skge, int slot,
2448 u32 control, u32 status)
2449{
2450 if (netif_msg_rx_err(skge))
2451 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2452 skge->netdev->name, slot, control, status);
2453
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002454 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002455 skge->net_stats.rx_length_errors++;
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002456 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2457 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2458 skge->net_stats.rx_length_errors++;
2459 if (status & XMR_FS_FRA_ERR)
2460 skge->net_stats.rx_frame_errors++;
2461 if (status & XMR_FS_FCS_ERR)
2462 skge->net_stats.rx_crc_errors++;
2463 } else {
2464 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2465 skge->net_stats.rx_length_errors++;
2466 if (status & GMR_FS_FRAGMENT)
2467 skge->net_stats.rx_frame_errors++;
2468 if (status & GMR_FS_CRC_ERR)
2469 skge->net_stats.rx_crc_errors++;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002470 }
2471}
2472
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002473/* Get receive buffer from descriptor.
2474 * Handles copy of small buffers and reallocation failures
2475 */
2476static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2477 struct skge_element *e,
2478 unsigned int len)
2479{
2480 struct sk_buff *nskb, *skb;
2481
2482 if (len < RX_COPY_THRESHOLD) {
2483 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN);
2484 if (unlikely(!nskb))
2485 return NULL;
2486
2487 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2488 pci_unmap_addr(e, mapaddr),
2489 len, PCI_DMA_FROMDEVICE);
2490 memcpy(nskb->data, e->skb->data, len);
2491 pci_dma_sync_single_for_device(skge->hw->pdev,
2492 pci_unmap_addr(e, mapaddr),
2493 len, PCI_DMA_FROMDEVICE);
2494
2495 if (skge->rx_csum) {
2496 struct skge_rx_desc *rd = e->desc;
2497 nskb->csum = le16_to_cpu(rd->csum2);
2498 nskb->ip_summed = CHECKSUM_HW;
2499 }
2500 skge_rx_reuse(e, skge->rx_buf_size);
2501 return nskb;
2502 } else {
2503 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size);
2504 if (unlikely(!nskb))
2505 return NULL;
2506
2507 pci_unmap_single(skge->hw->pdev,
2508 pci_unmap_addr(e, mapaddr),
2509 pci_unmap_len(e, maplen),
2510 PCI_DMA_FROMDEVICE);
2511 skb = e->skb;
2512 if (skge->rx_csum) {
2513 struct skge_rx_desc *rd = e->desc;
2514 skb->csum = le16_to_cpu(rd->csum2);
2515 skb->ip_summed = CHECKSUM_HW;
2516 }
2517
2518 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2519 return skb;
2520 }
2521}
2522
2523
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002524static int skge_poll(struct net_device *dev, int *budget)
2525{
2526 struct skge_port *skge = netdev_priv(dev);
2527 struct skge_hw *hw = skge->hw;
2528 struct skge_ring *ring = &skge->rx_ring;
2529 struct skge_element *e;
2530 unsigned int to_do = min(dev->quota, *budget);
2531 unsigned int work_done = 0;
Stephen Hemminger7e676d92005-06-27 11:33:13 -07002532
2533 pr_debug("skge_poll\n");
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002534
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002535 for (e = ring->to_clean; work_done < to_do; e = e->next) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002536 struct skge_rx_desc *rd = e->desc;
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002537 struct sk_buff *skb;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002538 u32 control, len, status;
2539
2540 rmb();
2541 control = rd->control;
2542 if (control & BMU_OWN)
2543 break;
2544
2545 len = control & BMU_BBC;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002546 status = rd->status;
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002547
2548 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2549 || bad_phy_status(hw, status))) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002550 skge_rx_error(skge, e - ring->start, control, status);
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002551 skge_rx_reuse(e, skge->rx_buf_size);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002552 continue;
2553 }
2554
2555 if (netif_msg_rx_status(skge))
Al Viro0b2d7fe2005-04-03 09:15:52 +01002556 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002557 dev->name, e - ring->start, rd->status, len);
2558
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002559 skb = skge_rx_get(skge, e, len);
2560 if (likely(skb)) {
2561 skb_put(skb, len);
2562 skb->protocol = eth_type_trans(skb, dev);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002563
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002564 dev->last_rx = jiffies;
2565 netif_receive_skb(skb);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002566
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002567 ++work_done;
2568 } else
2569 skge_rx_reuse(e, skge->rx_buf_size);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002570 }
2571 ring->to_clean = e;
2572
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002573 /* restart receiver */
2574 wmb();
2575 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
2576 CSR_START | CSR_IRQ_CL_F);
2577
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002578 *budget -= work_done;
2579 dev->quota -= work_done;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002580
Stephen Hemminger19a33d42005-06-27 11:33:15 -07002581 if (work_done >= to_do)
2582 return 1; /* not done */
2583
2584 local_irq_disable();
2585 __netif_rx_complete(dev);
2586 hw->intr_mask |= portirqmask[skge->port];
2587 skge_write32(hw, B0_IMSK, hw->intr_mask);
2588 local_irq_enable();
2589 return 0;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002590}
2591
2592static inline void skge_tx_intr(struct net_device *dev)
2593{
2594 struct skge_port *skge = netdev_priv(dev);
2595 struct skge_hw *hw = skge->hw;
2596 struct skge_ring *ring = &skge->tx_ring;
2597 struct skge_element *e;
2598
2599 spin_lock(&skge->tx_lock);
Stephen Hemminger95566062005-06-27 11:33:02 -07002600 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002601 struct skge_tx_desc *td = e->desc;
2602 u32 control;
2603
2604 rmb();
2605 control = td->control;
2606 if (control & BMU_OWN)
2607 break;
2608
2609 if (unlikely(netif_msg_tx_done(skge)))
Al Viro0b2d7fe2005-04-03 09:15:52 +01002610 printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002611 dev->name, e - ring->start, td->status);
2612
2613 skge_tx_free(hw, e);
2614 e->skb = NULL;
2615 ++skge->tx_avail;
2616 }
2617 ring->to_clean = e;
2618 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2619
2620 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
2621 netif_wake_queue(dev);
2622
2623 spin_unlock(&skge->tx_lock);
2624}
2625
Stephen Hemmingerf6620ca2005-07-22 16:26:02 -07002626/* Parity errors seem to happen when Genesis is connected to a switch
2627 * with no other ports present. Heartbeat error??
2628 */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002629static void skge_mac_parity(struct skge_hw *hw, int port)
2630{
Stephen Hemmingerf6620ca2005-07-22 16:26:02 -07002631 struct net_device *dev = hw->dev[port];
2632
2633 if (dev) {
2634 struct skge_port *skge = netdev_priv(dev);
2635 ++skge->net_stats.tx_heartbeat_errors;
2636 }
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002637
2638 if (hw->chip_id == CHIP_ID_GENESIS)
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002639 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002640 MFF_CLR_PERR);
2641 else
2642 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002643 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
Stephen Hemminger981d0372005-06-27 11:33:06 -07002644 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002645 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2646}
2647
2648static void skge_pci_clear(struct skge_hw *hw)
2649{
2650 u16 status;
2651
Stephen Hemminger467b3412005-06-27 11:33:05 -07002652 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002653 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
Stephen Hemminger467b3412005-06-27 11:33:05 -07002654 pci_write_config_word(hw->pdev, PCI_STATUS,
2655 status | PCI_STATUS_ERROR_BITS);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002656 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2657}
2658
2659static void skge_mac_intr(struct skge_hw *hw, int port)
2660{
Stephen Hemminger95566062005-06-27 11:33:02 -07002661 if (hw->chip_id == CHIP_ID_GENESIS)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002662 genesis_mac_intr(hw, port);
2663 else
2664 yukon_mac_intr(hw, port);
2665}
2666
2667/* Handle device specific framing and timeout interrupts */
2668static void skge_error_irq(struct skge_hw *hw)
2669{
2670 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2671
2672 if (hw->chip_id == CHIP_ID_GENESIS) {
2673 /* clear xmac errors */
2674 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002675 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002676 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002677 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002678 } else {
2679 /* Timestamp (unused) overflow */
2680 if (hwstatus & IS_IRQ_TIST_OV)
2681 skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002682 }
2683
2684 if (hwstatus & IS_RAM_RD_PAR) {
2685 printk(KERN_ERR PFX "Ram read data parity error\n");
2686 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
2687 }
2688
2689 if (hwstatus & IS_RAM_WR_PAR) {
2690 printk(KERN_ERR PFX "Ram write data parity error\n");
2691 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
2692 }
2693
2694 if (hwstatus & IS_M1_PAR_ERR)
2695 skge_mac_parity(hw, 0);
2696
2697 if (hwstatus & IS_M2_PAR_ERR)
2698 skge_mac_parity(hw, 1);
2699
2700 if (hwstatus & IS_R1_PAR_ERR)
2701 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
2702
2703 if (hwstatus & IS_R2_PAR_ERR)
2704 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
2705
2706 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
2707 printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n",
2708 hwstatus);
2709
2710 skge_pci_clear(hw);
2711
Stephen Hemminger050ec182005-08-16 14:00:54 -07002712 /* if error still set then just ignore it */
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002713 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2714 if (hwstatus & IS_IRQ_STAT) {
Stephen Hemminger050ec182005-08-16 14:00:54 -07002715 pr_debug("IRQ status %x: still set ignoring hardware errors\n",
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002716 hwstatus);
2717 hw->intr_mask &= ~IS_HW_ERR;
2718 }
2719 }
2720}
2721
2722/*
2723 * Interrrupt from PHY are handled in tasklet (soft irq)
2724 * because accessing phy registers requires spin wait which might
2725 * cause excess interrupt latency.
2726 */
2727static void skge_extirq(unsigned long data)
2728{
2729 struct skge_hw *hw = (struct skge_hw *) data;
2730 int port;
2731
2732 spin_lock(&hw->phy_lock);
2733 for (port = 0; port < 2; port++) {
2734 struct net_device *dev = hw->dev[port];
2735
2736 if (dev && netif_running(dev)) {
2737 struct skge_port *skge = netdev_priv(dev);
2738
2739 if (hw->chip_id != CHIP_ID_GENESIS)
2740 yukon_phy_intr(skge);
Stephen Hemminger89bf5f22005-06-27 11:33:10 -07002741 else
Stephen Hemminger45bada62005-06-27 11:33:12 -07002742 bcom_phy_intr(skge);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002743 }
2744 }
2745 spin_unlock(&hw->phy_lock);
2746
2747 local_irq_disable();
2748 hw->intr_mask |= IS_EXT_REG;
2749 skge_write32(hw, B0_IMSK, hw->intr_mask);
2750 local_irq_enable();
2751}
2752
2753static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2754{
2755 struct skge_hw *hw = dev_id;
2756 u32 status = skge_read32(hw, B0_SP_ISRC);
2757
2758 if (status == 0 || status == ~0) /* hotplug or shared irq */
2759 return IRQ_NONE;
2760
2761 status &= hw->intr_mask;
Stephen Hemminger7e676d92005-06-27 11:33:13 -07002762 if (status & IS_R1_F) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002763 hw->intr_mask &= ~IS_R1_F;
Stephen Hemminger7e676d92005-06-27 11:33:13 -07002764 netif_rx_schedule(hw->dev[0]);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002765 }
2766
Stephen Hemminger7e676d92005-06-27 11:33:13 -07002767 if (status & IS_R2_F) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002768 hw->intr_mask &= ~IS_R2_F;
Stephen Hemminger7e676d92005-06-27 11:33:13 -07002769 netif_rx_schedule(hw->dev[1]);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002770 }
2771
2772 if (status & IS_XA1_F)
2773 skge_tx_intr(hw->dev[0]);
2774
2775 if (status & IS_XA2_F)
2776 skge_tx_intr(hw->dev[1]);
2777
Stephen Hemmingerd25f5a62005-06-27 11:33:14 -07002778 if (status & IS_PA_TO_RX1) {
2779 struct skge_port *skge = netdev_priv(hw->dev[0]);
2780 ++skge->net_stats.rx_over_errors;
2781 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2782 }
2783
2784 if (status & IS_PA_TO_RX2) {
2785 struct skge_port *skge = netdev_priv(hw->dev[1]);
2786 ++skge->net_stats.rx_over_errors;
2787 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
2788 }
2789
2790 if (status & IS_PA_TO_TX1)
2791 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2792
2793 if (status & IS_PA_TO_TX2)
2794 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2795
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002796 if (status & IS_MAC1)
2797 skge_mac_intr(hw, 0);
Stephen Hemminger95566062005-06-27 11:33:02 -07002798
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002799 if (status & IS_MAC2)
2800 skge_mac_intr(hw, 1);
2801
2802 if (status & IS_HW_ERR)
2803 skge_error_irq(hw);
2804
2805 if (status & IS_EXT_REG) {
2806 hw->intr_mask &= ~IS_EXT_REG;
2807 tasklet_schedule(&hw->ext_tasklet);
2808 }
2809
Stephen Hemminger7e676d92005-06-27 11:33:13 -07002810 skge_write32(hw, B0_IMSK, hw->intr_mask);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002811
2812 return IRQ_HANDLED;
2813}
2814
2815#ifdef CONFIG_NET_POLL_CONTROLLER
2816static void skge_netpoll(struct net_device *dev)
2817{
2818 struct skge_port *skge = netdev_priv(dev);
2819
2820 disable_irq(dev->irq);
2821 skge_intr(dev->irq, skge->hw, NULL);
2822 enable_irq(dev->irq);
2823}
2824#endif
2825
2826static int skge_set_mac_address(struct net_device *dev, void *p)
2827{
2828 struct skge_port *skge = netdev_priv(dev);
2829 struct sockaddr *addr = p;
2830 int err = 0;
2831
2832 if (!is_valid_ether_addr(addr->sa_data))
2833 return -EADDRNOTAVAIL;
2834
2835 skge_down(dev);
2836 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2837 memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8,
2838 dev->dev_addr, ETH_ALEN);
2839 memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8,
2840 dev->dev_addr, ETH_ALEN);
2841 if (dev->flags & IFF_UP)
2842 err = skge_up(dev);
2843 return err;
2844}
2845
2846static const struct {
2847 u8 id;
2848 const char *name;
2849} skge_chips[] = {
2850 { CHIP_ID_GENESIS, "Genesis" },
2851 { CHIP_ID_YUKON, "Yukon" },
2852 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2853 { CHIP_ID_YUKON_LP, "Yukon-LP"},
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002854};
2855
2856static const char *skge_board_name(const struct skge_hw *hw)
2857{
2858 int i;
2859 static char buf[16];
2860
2861 for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2862 if (skge_chips[i].id == hw->chip_id)
2863 return skge_chips[i].name;
2864
2865 snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2866 return buf;
2867}
2868
2869
2870/*
2871 * Setup the board data structure, but don't bring up
2872 * the port(s)
2873 */
2874static int skge_reset(struct skge_hw *hw)
2875{
2876 u16 ctst;
Stephen Hemminger5e1705d2005-08-16 14:00:58 -07002877 u8 t8, mac_cfg, pmd_type, phy_type;
Stephen Hemminger981d0372005-06-27 11:33:06 -07002878 int i;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002879
2880 ctst = skge_read16(hw, B0_CTST);
2881
2882 /* do a SW reset */
2883 skge_write8(hw, B0_CTST, CS_RST_SET);
2884 skge_write8(hw, B0_CTST, CS_RST_CLR);
2885
2886 /* clear PCI errors, if any */
2887 skge_pci_clear(hw);
2888
2889 skge_write8(hw, B0_CTST, CS_MRST_CLR);
2890
2891 /* restore CLK_RUN bits (for Yukon-Lite) */
2892 skge_write16(hw, B0_CTST,
2893 ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
2894
2895 hw->chip_id = skge_read8(hw, B2_CHIP_ID);
Stephen Hemminger5e1705d2005-08-16 14:00:58 -07002896 phy_type = skge_read8(hw, B2_E_1) & 0xf;
2897 pmd_type = skge_read8(hw, B2_PMD_TYP);
2898 hw->copper = (pmd_type == 'T' || pmd_type == '1');
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002899
Stephen Hemminger95566062005-06-27 11:33:02 -07002900 switch (hw->chip_id) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002901 case CHIP_ID_GENESIS:
Stephen Hemminger5e1705d2005-08-16 14:00:58 -07002902 switch (phy_type) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002903 case SK_PHY_BCOM:
2904 hw->phy_addr = PHY_ADDR_BCOM;
2905 break;
2906 default:
2907 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
Stephen Hemminger5e1705d2005-08-16 14:00:58 -07002908 pci_name(hw->pdev), phy_type);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002909 return -EOPNOTSUPP;
2910 }
2911 break;
2912
2913 case CHIP_ID_YUKON:
2914 case CHIP_ID_YUKON_LITE:
2915 case CHIP_ID_YUKON_LP:
Stephen Hemminger5e1705d2005-08-16 14:00:58 -07002916 if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
2917 hw->copper = 1;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002918
2919 hw->phy_addr = PHY_ADDR_MARV;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002920 break;
2921
2922 default:
2923 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
2924 pci_name(hw->pdev), hw->chip_id);
2925 return -EOPNOTSUPP;
2926 }
2927
Stephen Hemminger981d0372005-06-27 11:33:06 -07002928 mac_cfg = skge_read8(hw, B2_MAC_CFG);
2929 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2930 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002931
2932 /* read the adapters RAM size */
2933 t8 = skge_read8(hw, B2_E_0);
2934 if (hw->chip_id == CHIP_ID_GENESIS) {
2935 if (t8 == 3) {
2936 /* special case: 4 x 64k x 36, offset = 0x80000 */
2937 hw->ram_size = 0x100000;
2938 hw->ram_offset = 0x80000;
2939 } else
2940 hw->ram_size = t8 * 512;
2941 }
2942 else if (t8 == 0)
2943 hw->ram_size = 0x20000;
2944 else
2945 hw->ram_size = t8 * 4096;
2946
Stephen Hemminger050ec182005-08-16 14:00:54 -07002947 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002948 if (hw->chip_id == CHIP_ID_GENESIS)
2949 genesis_init(hw);
2950 else {
2951 /* switch power to VCC (WA for VAUX problem) */
2952 skge_write8(hw, B0_POWER_CTRL,
2953 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
Stephen Hemminger050ec182005-08-16 14:00:54 -07002954 /* avoid boards with stuck Hardware error bits */
2955 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
2956 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
2957 printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
2958 hw->intr_mask &= ~IS_HW_ERR;
2959 }
2960
Stephen Hemminger981d0372005-06-27 11:33:06 -07002961 for (i = 0; i < hw->ports; i++) {
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002962 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2963 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002964 }
2965 }
2966
2967 /* turn off hardware timer (unused) */
2968 skge_write8(hw, B2_TI_CTRL, TIM_STOP);
2969 skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2970 skge_write8(hw, B0_LED, LED_STAT_ON);
2971
2972 /* enable the Tx Arbiters */
Stephen Hemminger981d0372005-06-27 11:33:06 -07002973 for (i = 0; i < hw->ports; i++)
Stephen Hemminger6b0c1482005-06-27 11:33:04 -07002974 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04002975
2976 /* Initialize ram interface */
2977 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
2978
2979 skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
2980 skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
2981 skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
2982 skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
2983 skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
2984 skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
2985 skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
2986 skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
2987 skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
2988 skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
2989 skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
2990 skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
2991
2992 skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
2993
2994 /* Set interrupt moderation for Transmit only
2995 * Receive interrupts avoided by NAPI
2996 */
2997 skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
2998 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
2999 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3000
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003001 skge_write32(hw, B0_IMSK, hw->intr_mask);
3002
3003 if (hw->chip_id != CHIP_ID_GENESIS)
3004 skge_write8(hw, GMAC_IRQ_MSK, 0);
3005
3006 spin_lock_bh(&hw->phy_lock);
Stephen Hemminger981d0372005-06-27 11:33:06 -07003007 for (i = 0; i < hw->ports; i++) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003008 if (hw->chip_id == CHIP_ID_GENESIS)
3009 genesis_reset(hw, i);
3010 else
3011 yukon_reset(hw, i);
3012 }
3013 spin_unlock_bh(&hw->phy_lock);
3014
3015 return 0;
3016}
3017
3018/* Initialize network device */
Stephen Hemminger981d0372005-06-27 11:33:06 -07003019static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3020 int highmem)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003021{
3022 struct skge_port *skge;
3023 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3024
3025 if (!dev) {
3026 printk(KERN_ERR "skge etherdev alloc failed");
3027 return NULL;
3028 }
3029
3030 SET_MODULE_OWNER(dev);
3031 SET_NETDEV_DEV(dev, &hw->pdev->dev);
3032 dev->open = skge_up;
3033 dev->stop = skge_down;
3034 dev->hard_start_xmit = skge_xmit_frame;
3035 dev->get_stats = skge_get_stats;
3036 if (hw->chip_id == CHIP_ID_GENESIS)
3037 dev->set_multicast_list = genesis_set_multicast;
3038 else
3039 dev->set_multicast_list = yukon_set_multicast;
3040
3041 dev->set_mac_address = skge_set_mac_address;
3042 dev->change_mtu = skge_change_mtu;
3043 SET_ETHTOOL_OPS(dev, &skge_ethtool_ops);
3044 dev->tx_timeout = skge_tx_timeout;
3045 dev->watchdog_timeo = TX_WATCHDOG;
3046 dev->poll = skge_poll;
3047 dev->weight = NAPI_WEIGHT;
3048#ifdef CONFIG_NET_POLL_CONTROLLER
3049 dev->poll_controller = skge_netpoll;
3050#endif
3051 dev->irq = hw->pdev->irq;
3052 dev->features = NETIF_F_LLTX;
Stephen Hemminger981d0372005-06-27 11:33:06 -07003053 if (highmem)
3054 dev->features |= NETIF_F_HIGHDMA;
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003055
3056 skge = netdev_priv(dev);
3057 skge->netdev = dev;
3058 skge->hw = hw;
3059 skge->msg_enable = netif_msg_init(debug, default_msg);
3060 skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3061 skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3062
3063 /* Auto speed and flow control */
3064 skge->autoneg = AUTONEG_ENABLE;
3065 skge->flow_control = FLOW_MODE_SYMMETRIC;
3066 skge->duplex = -1;
3067 skge->speed = -1;
Stephen Hemminger31b619c2005-06-27 11:33:11 -07003068 skge->advertising = skge_supported_modes(hw);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003069
3070 hw->dev[port] = dev;
3071
3072 skge->port = port;
3073
3074 spin_lock_init(&skge->tx_lock);
3075
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003076 if (hw->chip_id != CHIP_ID_GENESIS) {
3077 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3078 skge->rx_csum = 1;
3079 }
3080
3081 /* read the mac address */
3082 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3083
3084 /* device is off until link detection */
3085 netif_carrier_off(dev);
3086 netif_stop_queue(dev);
3087
3088 return dev;
3089}
3090
3091static void __devinit skge_show_addr(struct net_device *dev)
3092{
3093 const struct skge_port *skge = netdev_priv(dev);
3094
3095 if (netif_msg_probe(skge))
3096 printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3097 dev->name,
3098 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3099 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3100}
3101
3102static int __devinit skge_probe(struct pci_dev *pdev,
3103 const struct pci_device_id *ent)
3104{
3105 struct net_device *dev, *dev1;
3106 struct skge_hw *hw;
3107 int err, using_dac = 0;
3108
3109 if ((err = pci_enable_device(pdev))) {
3110 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3111 pci_name(pdev));
3112 goto err_out;
3113 }
3114
3115 if ((err = pci_request_regions(pdev, DRV_NAME))) {
3116 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3117 pci_name(pdev));
3118 goto err_out_disable_pdev;
3119 }
3120
3121 pci_set_master(pdev);
3122
3123 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)))
3124 using_dac = 1;
3125 else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3126 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3127 pci_name(pdev));
3128 goto err_out_free_regions;
3129 }
3130
3131#ifdef __BIG_ENDIAN
3132 /* byte swap decriptors in hardware */
3133 {
3134 u32 reg;
3135
3136 pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3137 reg |= PCI_REV_DESC;
3138 pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3139 }
3140#endif
3141
3142 err = -ENOMEM;
3143 hw = kmalloc(sizeof(*hw), GFP_KERNEL);
3144 if (!hw) {
3145 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3146 pci_name(pdev));
3147 goto err_out_free_regions;
3148 }
3149
3150 memset(hw, 0, sizeof(*hw));
3151 hw->pdev = pdev;
3152 spin_lock_init(&hw->phy_lock);
3153 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3154
3155 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3156 if (!hw->regs) {
3157 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3158 pci_name(pdev));
3159 goto err_out_free_hw;
3160 }
3161
3162 if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) {
3163 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3164 pci_name(pdev), pdev->irq);
3165 goto err_out_iounmap;
3166 }
3167 pci_set_drvdata(pdev, hw);
3168
3169 err = skge_reset(hw);
3170 if (err)
3171 goto err_out_free_irq;
3172
3173 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n",
3174 pci_resource_start(pdev, 0), pdev->irq,
Stephen Hemminger981d0372005-06-27 11:33:06 -07003175 skge_board_name(hw), hw->chip_rev);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003176
Stephen Hemminger981d0372005-06-27 11:33:06 -07003177 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003178 goto err_out_led_off;
3179
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003180 if ((err = register_netdev(dev))) {
3181 printk(KERN_ERR PFX "%s: cannot register net device\n",
3182 pci_name(pdev));
3183 goto err_out_free_netdev;
3184 }
3185
3186 skge_show_addr(dev);
3187
Stephen Hemminger981d0372005-06-27 11:33:06 -07003188 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003189 if (register_netdev(dev1) == 0)
3190 skge_show_addr(dev1);
3191 else {
3192 /* Failure to register second port need not be fatal */
3193 printk(KERN_WARNING PFX "register of second port failed\n");
3194 hw->dev[1] = NULL;
3195 free_netdev(dev1);
3196 }
3197 }
3198
3199 return 0;
3200
3201err_out_free_netdev:
3202 free_netdev(dev);
3203err_out_led_off:
3204 skge_write16(hw, B0_LED, LED_STAT_OFF);
3205err_out_free_irq:
3206 free_irq(pdev->irq, hw);
3207err_out_iounmap:
3208 iounmap(hw->regs);
3209err_out_free_hw:
3210 kfree(hw);
3211err_out_free_regions:
3212 pci_release_regions(pdev);
3213err_out_disable_pdev:
3214 pci_disable_device(pdev);
3215 pci_set_drvdata(pdev, NULL);
3216err_out:
3217 return err;
3218}
3219
3220static void __devexit skge_remove(struct pci_dev *pdev)
3221{
3222 struct skge_hw *hw = pci_get_drvdata(pdev);
3223 struct net_device *dev0, *dev1;
3224
Stephen Hemminger95566062005-06-27 11:33:02 -07003225 if (!hw)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003226 return;
3227
3228 if ((dev1 = hw->dev[1]))
3229 unregister_netdev(dev1);
3230 dev0 = hw->dev[0];
3231 unregister_netdev(dev0);
3232
3233 tasklet_kill(&hw->ext_tasklet);
3234
3235 free_irq(pdev->irq, hw);
3236 pci_release_regions(pdev);
3237 pci_disable_device(pdev);
3238 if (dev1)
3239 free_netdev(dev1);
3240 free_netdev(dev0);
3241 skge_write16(hw, B0_LED, LED_STAT_OFF);
3242 iounmap(hw->regs);
3243 kfree(hw);
3244 pci_set_drvdata(pdev, NULL);
3245}
3246
3247#ifdef CONFIG_PM
Pavel Machek2a569572005-07-07 17:56:40 -07003248static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003249{
3250 struct skge_hw *hw = pci_get_drvdata(pdev);
3251 int i, wol = 0;
3252
Stephen Hemminger95566062005-06-27 11:33:02 -07003253 for (i = 0; i < 2; i++) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003254 struct net_device *dev = hw->dev[i];
3255
3256 if (dev) {
3257 struct skge_port *skge = netdev_priv(dev);
3258 if (netif_running(dev)) {
3259 netif_carrier_off(dev);
3260 skge_down(dev);
3261 }
3262 netif_device_detach(dev);
3263 wol |= skge->wol;
3264 }
3265 }
3266
3267 pci_save_state(pdev);
Pavel Machek2a569572005-07-07 17:56:40 -07003268 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003269 pci_disable_device(pdev);
3270 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3271
3272 return 0;
3273}
3274
3275static int skge_resume(struct pci_dev *pdev)
3276{
3277 struct skge_hw *hw = pci_get_drvdata(pdev);
3278 int i;
3279
3280 pci_set_power_state(pdev, PCI_D0);
3281 pci_restore_state(pdev);
3282 pci_enable_wake(pdev, PCI_D0, 0);
3283
3284 skge_reset(hw);
3285
Stephen Hemminger95566062005-06-27 11:33:02 -07003286 for (i = 0; i < 2; i++) {
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003287 struct net_device *dev = hw->dev[i];
3288 if (dev) {
3289 netif_device_attach(dev);
Stephen Hemminger95566062005-06-27 11:33:02 -07003290 if (netif_running(dev))
Stephen Hemmingerbaef58b2005-05-12 20:14:36 -04003291 skge_up(dev);
3292 }
3293 }
3294 return 0;
3295}
3296#endif
3297
3298static struct pci_driver skge_driver = {
3299 .name = DRV_NAME,
3300 .id_table = skge_id_table,
3301 .probe = skge_probe,
3302 .remove = __devexit_p(skge_remove),
3303#ifdef CONFIG_PM
3304 .suspend = skge_suspend,
3305 .resume = skge_resume,
3306#endif
3307};
3308
3309static int __init skge_init_module(void)
3310{
3311 return pci_module_init(&skge_driver);
3312}
3313
3314static void __exit skge_cleanup_module(void)
3315{
3316 pci_unregister_driver(&skge_driver);
3317}
3318
3319module_init(skge_init_module);
3320module_exit(skge_cleanup_module);