Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * meth.c -- O2 Builtin 10/100 Ethernet driver |
| 3 | * |
| 4 | * Copyright (C) 2001-2003 Ilya Volynets |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/init.h> |
| 13 | |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/kernel.h> /* printk() */ |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/errno.h> /* error codes */ |
| 19 | #include <linux/types.h> /* size_t */ |
| 20 | #include <linux/interrupt.h> /* mark_bh */ |
| 21 | |
| 22 | #include <linux/in.h> |
| 23 | #include <linux/in6.h> |
| 24 | #include <linux/device.h> /* struct device, et al */ |
| 25 | #include <linux/netdevice.h> /* struct device, and other headers */ |
| 26 | #include <linux/etherdevice.h> /* eth_type_trans */ |
| 27 | #include <linux/ip.h> /* struct iphdr */ |
| 28 | #include <linux/tcp.h> /* struct tcphdr */ |
| 29 | #include <linux/skbuff.h> |
| 30 | #include <linux/mii.h> /* MII definitions */ |
| 31 | |
| 32 | #include <asm/ip32/mace.h> |
| 33 | #include <asm/ip32/ip32_ints.h> |
| 34 | |
| 35 | #include <asm/io.h> |
| 36 | #include <asm/checksum.h> |
| 37 | #include <asm/scatterlist.h> |
| 38 | #include <linux/dma-mapping.h> |
| 39 | |
| 40 | #include "meth.h" |
| 41 | |
| 42 | #ifndef MFE_DEBUG |
| 43 | #define MFE_DEBUG 0 |
| 44 | #endif |
| 45 | |
| 46 | #if MFE_DEBUG>=1 |
| 47 | #define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args) |
| 48 | #define MFE_RX_DEBUG 2 |
| 49 | #else |
| 50 | #define DPRINTK(str,args...) |
| 51 | #define MFE_RX_DEBUG 0 |
| 52 | #endif |
| 53 | |
| 54 | |
| 55 | static const char *meth_str="SGI O2 Fast Ethernet"; |
| 56 | MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>"); |
| 57 | MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); |
| 58 | |
| 59 | #define HAVE_TX_TIMEOUT |
| 60 | /* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ |
| 61 | #define TX_TIMEOUT (400*HZ/1000) |
| 62 | |
| 63 | #ifdef HAVE_TX_TIMEOUT |
| 64 | static int timeout = TX_TIMEOUT; |
Rusty Russell | 8d3b33f | 2006-03-25 03:07:05 -0800 | [diff] [blame] | 65 | module_param(timeout, int, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | #endif |
| 67 | |
| 68 | /* |
| 69 | * This structure is private to each device. It is used to pass |
| 70 | * packets in and out, so there is place for a packet |
| 71 | */ |
| 72 | struct meth_private { |
| 73 | struct net_device_stats stats; |
| 74 | /* in-memory copy of MAC Control register */ |
| 75 | unsigned long mac_ctrl; |
| 76 | /* in-memory copy of DMA Control register */ |
| 77 | unsigned long dma_ctrl; |
| 78 | /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ |
| 79 | unsigned long phy_addr; |
| 80 | tx_packet *tx_ring; |
| 81 | dma_addr_t tx_ring_dma; |
| 82 | struct sk_buff *tx_skbs[TX_RING_ENTRIES]; |
| 83 | dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; |
| 84 | unsigned long tx_read, tx_write, tx_count; |
| 85 | |
| 86 | rx_packet *rx_ring[RX_RING_ENTRIES]; |
| 87 | dma_addr_t rx_ring_dmas[RX_RING_ENTRIES]; |
| 88 | struct sk_buff *rx_skbs[RX_RING_ENTRIES]; |
| 89 | unsigned long rx_write; |
| 90 | |
| 91 | spinlock_t meth_lock; |
| 92 | }; |
| 93 | |
| 94 | static void meth_tx_timeout(struct net_device *dev); |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 95 | static irqreturn_t meth_interrupt(int irq, void *dev_id); |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 96 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | /* global, initialized in ip32-setup.c */ |
| 98 | char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; |
| 99 | |
| 100 | static inline void load_eaddr(struct net_device *dev) |
| 101 | { |
| 102 | int i; |
| 103 | DPRINTK("Loading MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", |
| 104 | (int)o2meth_eaddr[0]&0xFF,(int)o2meth_eaddr[1]&0xFF,(int)o2meth_eaddr[2]&0xFF, |
| 105 | (int)o2meth_eaddr[3]&0xFF,(int)o2meth_eaddr[4]&0xFF,(int)o2meth_eaddr[5]&0xFF); |
| 106 | for (i = 0; i < 6; i++) |
| 107 | dev->dev_addr[i] = o2meth_eaddr[i]; |
| 108 | mace->eth.mac_addr = (*(unsigned long*)o2meth_eaddr) >> 16; |
| 109 | } |
| 110 | |
| 111 | /* |
| 112 | * Waits for BUSY status of mdio bus to clear |
| 113 | */ |
| 114 | #define WAIT_FOR_PHY(___rval) \ |
| 115 | while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \ |
| 116 | udelay(25); \ |
| 117 | } |
| 118 | /*read phy register, return value read */ |
| 119 | static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) |
| 120 | { |
| 121 | unsigned long rval; |
| 122 | WAIT_FOR_PHY(rval); |
| 123 | mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f); |
| 124 | udelay(25); |
| 125 | mace->eth.phy_trans_go = 1; |
| 126 | udelay(25); |
| 127 | WAIT_FOR_PHY(rval); |
| 128 | return rval & MDIO_DATA_MASK; |
| 129 | } |
| 130 | |
| 131 | static int mdio_probe(struct meth_private *priv) |
| 132 | { |
| 133 | int i; |
| 134 | unsigned long p2, p3; |
| 135 | /* check if phy is detected already */ |
| 136 | if(priv->phy_addr>=0&&priv->phy_addr<32) |
| 137 | return 0; |
| 138 | spin_lock(&priv->meth_lock); |
| 139 | for (i=0;i<32;++i){ |
| 140 | priv->phy_addr=i; |
| 141 | p2=mdio_read(priv,2); |
| 142 | p3=mdio_read(priv,3); |
| 143 | #if MFE_DEBUG>=2 |
| 144 | switch ((p2<<12)|(p3>>4)){ |
| 145 | case PHY_QS6612X: |
| 146 | DPRINTK("PHY is QS6612X\n"); |
| 147 | break; |
| 148 | case PHY_ICS1889: |
| 149 | DPRINTK("PHY is ICS1889\n"); |
| 150 | break; |
| 151 | case PHY_ICS1890: |
| 152 | DPRINTK("PHY is ICS1890\n"); |
| 153 | break; |
| 154 | case PHY_DP83840: |
| 155 | DPRINTK("PHY is DP83840\n"); |
| 156 | break; |
| 157 | } |
| 158 | #endif |
| 159 | if(p2!=0xffff&&p2!=0x0000){ |
| 160 | DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4)); |
| 161 | break; |
| 162 | } |
| 163 | } |
| 164 | spin_unlock(&priv->meth_lock); |
| 165 | if(priv->phy_addr<32) { |
| 166 | return 0; |
| 167 | } |
| 168 | DPRINTK("Oopsie! PHY is not known!\n"); |
| 169 | priv->phy_addr=-1; |
| 170 | return -ENODEV; |
| 171 | } |
| 172 | |
| 173 | static void meth_check_link(struct net_device *dev) |
| 174 | { |
| 175 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 176 | unsigned long mii_advertising = mdio_read(priv, 4); |
| 177 | unsigned long mii_partner = mdio_read(priv, 5); |
| 178 | unsigned long negotiated = mii_advertising & mii_partner; |
| 179 | unsigned long duplex, speed; |
| 180 | |
| 181 | if (mii_partner == 0xffff) |
| 182 | return; |
| 183 | |
| 184 | speed = (negotiated & 0x0380) ? METH_100MBIT : 0; |
| 185 | duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ? |
| 186 | METH_PHY_FDX : 0; |
| 187 | |
| 188 | if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) { |
| 189 | DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half"); |
| 190 | if (duplex) |
| 191 | priv->mac_ctrl |= METH_PHY_FDX; |
| 192 | else |
| 193 | priv->mac_ctrl &= ~METH_PHY_FDX; |
| 194 | mace->eth.mac_ctrl = priv->mac_ctrl; |
| 195 | } |
| 196 | |
| 197 | if ((priv->mac_ctrl & METH_100MBIT) ^ speed) { |
| 198 | DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10); |
| 199 | if (duplex) |
| 200 | priv->mac_ctrl |= METH_100MBIT; |
| 201 | else |
| 202 | priv->mac_ctrl &= ~METH_100MBIT; |
| 203 | mace->eth.mac_ctrl = priv->mac_ctrl; |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | |
| 208 | static int meth_init_tx_ring(struct meth_private *priv) |
| 209 | { |
| 210 | /* Init TX ring */ |
| 211 | priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, |
| 212 | &priv->tx_ring_dma, GFP_ATOMIC); |
| 213 | if (!priv->tx_ring) |
| 214 | return -ENOMEM; |
| 215 | memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE); |
| 216 | priv->tx_count = priv->tx_read = priv->tx_write = 0; |
| 217 | mace->eth.tx_ring_base = priv->tx_ring_dma; |
| 218 | /* Now init skb save area */ |
| 219 | memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs)); |
| 220 | memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); |
| 221 | return 0; |
| 222 | } |
| 223 | |
| 224 | static int meth_init_rx_ring(struct meth_private *priv) |
| 225 | { |
| 226 | int i; |
| 227 | |
| 228 | for (i = 0; i < RX_RING_ENTRIES; i++) { |
| 229 | priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); |
| 230 | /* 8byte status vector + 3quad padding + 2byte padding, |
| 231 | * to put data on 64bit aligned boundary */ |
| 232 | skb_reserve(priv->rx_skbs[i],METH_RX_HEAD); |
| 233 | priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); |
| 234 | /* I'll need to re-sync it after each RX */ |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 235 | priv->rx_ring_dmas[i] = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | dma_map_single(NULL, priv->rx_ring[i], |
| 237 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 238 | mace->eth.rx_fifo = priv->rx_ring_dmas[i]; |
| 239 | } |
| 240 | priv->rx_write = 0; |
| 241 | return 0; |
| 242 | } |
| 243 | static void meth_free_tx_ring(struct meth_private *priv) |
| 244 | { |
| 245 | int i; |
| 246 | |
| 247 | /* Remove any pending skb */ |
| 248 | for (i = 0; i < TX_RING_ENTRIES; i++) { |
| 249 | if (priv->tx_skbs[i]) |
| 250 | dev_kfree_skb(priv->tx_skbs[i]); |
| 251 | priv->tx_skbs[i] = NULL; |
| 252 | } |
| 253 | dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring, |
| 254 | priv->tx_ring_dma); |
| 255 | } |
| 256 | |
| 257 | /* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ |
| 258 | static void meth_free_rx_ring(struct meth_private *priv) |
| 259 | { |
| 260 | int i; |
| 261 | |
| 262 | for (i = 0; i < RX_RING_ENTRIES; i++) { |
| 263 | dma_unmap_single(NULL, priv->rx_ring_dmas[i], |
| 264 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 265 | priv->rx_ring[i] = 0; |
| 266 | priv->rx_ring_dmas[i] = 0; |
| 267 | kfree_skb(priv->rx_skbs[i]); |
| 268 | } |
| 269 | } |
| 270 | |
| 271 | int meth_reset(struct net_device *dev) |
| 272 | { |
| 273 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 274 | |
| 275 | /* Reset card */ |
| 276 | mace->eth.mac_ctrl = SGI_MAC_RESET; |
| 277 | udelay(1); |
| 278 | mace->eth.mac_ctrl = 0; |
| 279 | udelay(25); |
| 280 | |
| 281 | /* Load ethernet address */ |
| 282 | load_eaddr(dev); |
| 283 | /* Should load some "errata", but later */ |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 284 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | /* Check for device */ |
| 286 | if (mdio_probe(priv) < 0) { |
| 287 | DPRINTK("Unable to find PHY\n"); |
| 288 | return -ENODEV; |
| 289 | } |
| 290 | |
| 291 | /* Initial mode: 10 | Half-duplex | Accept normal packets */ |
| 292 | priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; |
| 293 | if (dev->flags | IFF_PROMISC) |
| 294 | priv->mac_ctrl |= METH_PROMISC; |
| 295 | mace->eth.mac_ctrl = priv->mac_ctrl; |
| 296 | |
| 297 | /* Autonegotiate speed and duplex mode */ |
| 298 | meth_check_link(dev); |
| 299 | |
| 300 | /* Now set dma control, but don't enable DMA, yet */ |
| 301 | priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) | |
| 302 | (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT); |
| 303 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 304 | |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | /*============End Helper Routines=====================*/ |
| 309 | |
| 310 | /* |
| 311 | * Open and close |
| 312 | */ |
| 313 | static int meth_open(struct net_device *dev) |
| 314 | { |
| 315 | struct meth_private *priv = dev->priv; |
| 316 | int ret; |
| 317 | |
| 318 | priv->phy_addr = -1; /* No PHY is known yet... */ |
| 319 | |
| 320 | /* Initialize the hardware */ |
| 321 | ret = meth_reset(dev); |
| 322 | if (ret < 0) |
| 323 | return ret; |
| 324 | |
| 325 | /* Allocate the ring buffers */ |
| 326 | ret = meth_init_tx_ring(priv); |
| 327 | if (ret < 0) |
| 328 | return ret; |
| 329 | ret = meth_init_rx_ring(priv); |
| 330 | if (ret < 0) |
| 331 | goto out_free_tx_ring; |
| 332 | |
| 333 | ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); |
| 334 | if (ret) { |
| 335 | printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); |
| 336 | goto out_free_rx_ring; |
| 337 | } |
| 338 | |
| 339 | /* Start DMA */ |
| 340 | priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/ |
| 341 | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; |
| 342 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 343 | |
| 344 | DPRINTK("About to start queue\n"); |
| 345 | netif_start_queue(dev); |
| 346 | |
| 347 | return 0; |
| 348 | |
| 349 | out_free_rx_ring: |
| 350 | meth_free_rx_ring(priv); |
| 351 | out_free_tx_ring: |
| 352 | meth_free_tx_ring(priv); |
| 353 | |
| 354 | return ret; |
| 355 | } |
| 356 | |
| 357 | static int meth_release(struct net_device *dev) |
| 358 | { |
| 359 | struct meth_private *priv = dev->priv; |
| 360 | |
| 361 | DPRINTK("Stopping queue\n"); |
| 362 | netif_stop_queue(dev); /* can't transmit any more */ |
| 363 | /* shut down DMA */ |
| 364 | priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN | |
| 365 | METH_DMA_RX_EN | METH_DMA_RX_INT_EN); |
| 366 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 367 | free_irq(dev->irq, dev); |
| 368 | meth_free_tx_ring(priv); |
| 369 | meth_free_rx_ring(priv); |
| 370 | |
| 371 | return 0; |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * Receive a packet: retrieve, encapsulate and pass over to upper levels |
| 376 | */ |
| 377 | static void meth_rx(struct net_device* dev, unsigned long int_status) |
| 378 | { |
| 379 | struct sk_buff *skb; |
| 380 | unsigned long status; |
| 381 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 382 | unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; |
| 383 | |
| 384 | spin_lock(&priv->meth_lock); |
| 385 | priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; |
| 386 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 387 | spin_unlock(&priv->meth_lock); |
| 388 | |
| 389 | if (int_status & METH_INT_RX_UNDERFLOW) { |
| 390 | fifo_rptr = (fifo_rptr - 1) & 0x0f; |
| 391 | } |
| 392 | while (priv->rx_write != fifo_rptr) { |
| 393 | dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write], |
| 394 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 395 | status = priv->rx_ring[priv->rx_write]->status.raw; |
| 396 | #if MFE_DEBUG |
| 397 | if (!(status & METH_RX_ST_VALID)) { |
| 398 | DPRINTK("Not received? status=%016lx\n",status); |
| 399 | } |
| 400 | #endif |
| 401 | if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) { |
| 402 | int len = (status & 0xffff) - 4; /* omit CRC */ |
| 403 | /* length sanity check */ |
| 404 | if (len < 60 || len > 1518) { |
| 405 | printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2lx.\n", |
| 406 | dev->name, priv->rx_write, |
| 407 | priv->rx_ring[priv->rx_write]->status.raw); |
| 408 | priv->stats.rx_errors++; |
| 409 | priv->stats.rx_length_errors++; |
| 410 | skb = priv->rx_skbs[priv->rx_write]; |
| 411 | } else { |
| 412 | skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC | GFP_DMA); |
| 413 | if (!skb) { |
| 414 | /* Ouch! No memory! Drop packet on the floor */ |
| 415 | DPRINTK("No mem: dropping packet\n"); |
| 416 | priv->stats.rx_dropped++; |
| 417 | skb = priv->rx_skbs[priv->rx_write]; |
| 418 | } else { |
| 419 | struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; |
| 420 | /* 8byte status vector + 3quad padding + 2byte padding, |
| 421 | * to put data on 64bit aligned boundary */ |
| 422 | skb_reserve(skb, METH_RX_HEAD); |
| 423 | /* Write metadata, and then pass to the receive level */ |
| 424 | skb_put(skb_c, len); |
| 425 | priv->rx_skbs[priv->rx_write] = skb; |
| 426 | skb_c->dev = dev; |
| 427 | skb_c->protocol = eth_type_trans(skb_c, dev); |
| 428 | dev->last_rx = jiffies; |
| 429 | priv->stats.rx_packets++; |
| 430 | priv->stats.rx_bytes += len; |
| 431 | netif_rx(skb_c); |
| 432 | } |
| 433 | } |
| 434 | } else { |
| 435 | priv->stats.rx_errors++; |
| 436 | skb=priv->rx_skbs[priv->rx_write]; |
| 437 | #if MFE_DEBUG>0 |
| 438 | printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); |
| 439 | if(status&METH_RX_ST_RCV_CODE_VIOLATION) |
| 440 | printk(KERN_WARNING "Receive Code Violation\n"); |
| 441 | if(status&METH_RX_ST_CRC_ERR) |
| 442 | printk(KERN_WARNING "CRC error\n"); |
| 443 | if(status&METH_RX_ST_INV_PREAMBLE_CTX) |
| 444 | printk(KERN_WARNING "Invalid Preamble Context\n"); |
| 445 | if(status&METH_RX_ST_LONG_EVT_SEEN) |
| 446 | printk(KERN_WARNING "Long Event Seen...\n"); |
| 447 | if(status&METH_RX_ST_BAD_PACKET) |
| 448 | printk(KERN_WARNING "Bad Packet\n"); |
| 449 | if(status&METH_RX_ST_CARRIER_EVT_SEEN) |
| 450 | printk(KERN_WARNING "Carrier Event Seen\n"); |
| 451 | #endif |
| 452 | } |
| 453 | priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; |
| 454 | priv->rx_ring[priv->rx_write]->status.raw = 0; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 455 | priv->rx_ring_dmas[priv->rx_write] = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | dma_map_single(NULL, priv->rx_ring[priv->rx_write], |
| 457 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 458 | mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; |
| 459 | ADVANCE_RX_PTR(priv->rx_write); |
| 460 | } |
| 461 | spin_lock(&priv->meth_lock); |
| 462 | /* In case there was underflow, and Rx DMA was disabled */ |
| 463 | priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; |
| 464 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 465 | mace->eth.int_stat = METH_INT_RX_THRESHOLD; |
| 466 | spin_unlock(&priv->meth_lock); |
| 467 | } |
| 468 | |
| 469 | static int meth_tx_full(struct net_device *dev) |
| 470 | { |
| 471 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 472 | |
| 473 | return (priv->tx_count >= TX_RING_ENTRIES - 1); |
| 474 | } |
| 475 | |
| 476 | static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) |
| 477 | { |
| 478 | struct meth_private *priv = dev->priv; |
| 479 | unsigned long status; |
| 480 | struct sk_buff *skb; |
| 481 | unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; |
| 482 | |
| 483 | spin_lock(&priv->meth_lock); |
| 484 | |
| 485 | /* Stop DMA notification */ |
| 486 | priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); |
| 487 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 488 | |
| 489 | while (priv->tx_read != rptr) { |
| 490 | skb = priv->tx_skbs[priv->tx_read]; |
| 491 | status = priv->tx_ring[priv->tx_read].header.raw; |
| 492 | #if MFE_DEBUG>=1 |
| 493 | if (priv->tx_read == priv->tx_write) |
| 494 | DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr); |
| 495 | #endif |
| 496 | if (status & METH_TX_ST_DONE) { |
| 497 | if (status & METH_TX_ST_SUCCESS){ |
| 498 | priv->stats.tx_packets++; |
| 499 | priv->stats.tx_bytes += skb->len; |
| 500 | } else { |
| 501 | priv->stats.tx_errors++; |
| 502 | #if MFE_DEBUG>=1 |
| 503 | DPRINTK("TX error: status=%016lx <",status); |
| 504 | if(status & METH_TX_ST_SUCCESS) |
| 505 | printk(" SUCCESS"); |
| 506 | if(status & METH_TX_ST_TOOLONG) |
| 507 | printk(" TOOLONG"); |
| 508 | if(status & METH_TX_ST_UNDERRUN) |
| 509 | printk(" UNDERRUN"); |
| 510 | if(status & METH_TX_ST_EXCCOLL) |
| 511 | printk(" EXCCOLL"); |
| 512 | if(status & METH_TX_ST_DEFER) |
| 513 | printk(" DEFER"); |
| 514 | if(status & METH_TX_ST_LATECOLL) |
| 515 | printk(" LATECOLL"); |
| 516 | printk(" >\n"); |
| 517 | #endif |
| 518 | } |
| 519 | } else { |
| 520 | DPRINTK("RPTR points us here, but packet not done?\n"); |
| 521 | break; |
| 522 | } |
| 523 | dev_kfree_skb_irq(skb); |
| 524 | priv->tx_skbs[priv->tx_read] = NULL; |
| 525 | priv->tx_ring[priv->tx_read].header.raw = 0; |
| 526 | priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); |
| 527 | priv->tx_count--; |
| 528 | } |
| 529 | |
| 530 | /* wake up queue if it was stopped */ |
| 531 | if (netif_queue_stopped(dev) && !meth_tx_full(dev)) { |
| 532 | netif_wake_queue(dev); |
| 533 | } |
| 534 | |
| 535 | mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; |
| 536 | spin_unlock(&priv->meth_lock); |
| 537 | } |
| 538 | |
| 539 | static void meth_error(struct net_device* dev, unsigned status) |
| 540 | { |
| 541 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 542 | |
| 543 | printk(KERN_WARNING "meth: error status: 0x%08x\n",status); |
| 544 | /* check for errors too... */ |
| 545 | if (status & (METH_INT_TX_LINK_FAIL)) |
| 546 | printk(KERN_WARNING "meth: link failure\n"); |
| 547 | /* Should I do full reset in this case? */ |
| 548 | if (status & (METH_INT_MEM_ERROR)) |
| 549 | printk(KERN_WARNING "meth: memory error\n"); |
| 550 | if (status & (METH_INT_TX_ABORT)) |
| 551 | printk(KERN_WARNING "meth: aborted\n"); |
| 552 | if (status & (METH_INT_RX_OVERFLOW)) |
| 553 | printk(KERN_WARNING "meth: Rx overflow\n"); |
| 554 | if (status & (METH_INT_RX_UNDERFLOW)) { |
| 555 | printk(KERN_WARNING "meth: Rx underflow\n"); |
| 556 | spin_lock(&priv->meth_lock); |
| 557 | mace->eth.int_stat = METH_INT_RX_UNDERFLOW; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 558 | /* more underflow interrupts will be delivered, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | * effectively throwing us into an infinite loop. |
| 560 | * Thus I stop processing Rx in this case. */ |
| 561 | priv->dma_ctrl &= ~METH_DMA_RX_EN; |
| 562 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 563 | DPRINTK("Disabled meth Rx DMA temporarily\n"); |
| 564 | spin_unlock(&priv->meth_lock); |
| 565 | } |
| 566 | mace->eth.int_stat = METH_INT_ERROR; |
| 567 | } |
| 568 | |
| 569 | /* |
| 570 | * The typical interrupt entry point |
| 571 | */ |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 572 | static irqreturn_t meth_interrupt(int irq, void *dev_id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | { |
| 574 | struct net_device *dev = (struct net_device *)dev_id; |
| 575 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 576 | unsigned long status; |
| 577 | |
| 578 | status = mace->eth.int_stat; |
| 579 | while (status & 0xff) { |
| 580 | /* First handle errors - if we get Rx underflow, |
| 581 | * Rx DMA will be disabled, and Rx handler will reenable |
| 582 | * it. I don't think it's possible to get Rx underflow, |
| 583 | * without getting Rx interrupt */ |
| 584 | if (status & METH_INT_ERROR) { |
| 585 | meth_error(dev, status); |
| 586 | } |
| 587 | if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { |
| 588 | /* a transmission is over: free the skb */ |
| 589 | meth_tx_cleanup(dev, status); |
| 590 | } |
| 591 | if (status & METH_INT_RX_THRESHOLD) { |
| 592 | if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) |
| 593 | break; |
| 594 | /* send it to meth_rx for handling */ |
| 595 | meth_rx(dev, status); |
| 596 | } |
| 597 | status = mace->eth.int_stat; |
| 598 | } |
| 599 | |
| 600 | return IRQ_HANDLED; |
| 601 | } |
| 602 | |
| 603 | /* |
| 604 | * Transmits packets that fit into TX descriptor (are <=120B) |
| 605 | */ |
| 606 | static void meth_tx_short_prepare(struct meth_private *priv, |
| 607 | struct sk_buff *skb) |
| 608 | { |
| 609 | tx_packet *desc = &priv->tx_ring[priv->tx_write]; |
| 610 | int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; |
| 611 | |
| 612 | desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); |
| 613 | /* maybe I should set whole thing to 0 first... */ |
| 614 | memcpy(desc->data.dt + (120 - len), skb->data, skb->len); |
| 615 | if (skb->len < len) |
| 616 | memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); |
| 617 | } |
| 618 | #define TX_CATBUF1 BIT(25) |
| 619 | static void meth_tx_1page_prepare(struct meth_private *priv, |
| 620 | struct sk_buff *skb) |
| 621 | { |
| 622 | tx_packet *desc = &priv->tx_ring[priv->tx_write]; |
| 623 | void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); |
| 624 | int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); |
| 625 | int buffer_len = skb->len - unaligned_len; |
| 626 | dma_addr_t catbuf; |
| 627 | |
| 628 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1); |
| 629 | |
| 630 | /* unaligned part */ |
| 631 | if (unaligned_len) { |
| 632 | memcpy(desc->data.dt + (120 - unaligned_len), |
| 633 | skb->data, unaligned_len); |
| 634 | desc->header.raw |= (128 - unaligned_len) << 16; |
| 635 | } |
| 636 | |
| 637 | /* first page */ |
| 638 | catbuf = dma_map_single(NULL, buffer_data, buffer_len, |
| 639 | DMA_TO_DEVICE); |
| 640 | desc->data.cat_buf[0].form.start_addr = catbuf >> 3; |
| 641 | desc->data.cat_buf[0].form.len = buffer_len - 1; |
| 642 | } |
| 643 | #define TX_CATBUF2 BIT(26) |
| 644 | static void meth_tx_2page_prepare(struct meth_private *priv, |
| 645 | struct sk_buff *skb) |
| 646 | { |
| 647 | tx_packet *desc = &priv->tx_ring[priv->tx_write]; |
| 648 | void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); |
| 649 | void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); |
| 650 | int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); |
| 651 | int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); |
| 652 | int buffer2_len = skb->len - buffer1_len - unaligned_len; |
| 653 | dma_addr_t catbuf1, catbuf2; |
| 654 | |
| 655 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); |
| 656 | /* unaligned part */ |
| 657 | if (unaligned_len){ |
| 658 | memcpy(desc->data.dt + (120 - unaligned_len), |
| 659 | skb->data, unaligned_len); |
| 660 | desc->header.raw |= (128 - unaligned_len) << 16; |
| 661 | } |
| 662 | |
| 663 | /* first page */ |
| 664 | catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, |
| 665 | DMA_TO_DEVICE); |
| 666 | desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; |
| 667 | desc->data.cat_buf[0].form.len = buffer1_len - 1; |
| 668 | /* second page */ |
| 669 | catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, |
| 670 | DMA_TO_DEVICE); |
| 671 | desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; |
| 672 | desc->data.cat_buf[1].form.len = buffer2_len - 1; |
| 673 | } |
| 674 | |
| 675 | static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) |
| 676 | { |
| 677 | /* Remember the skb, so we can free it at interrupt time */ |
| 678 | priv->tx_skbs[priv->tx_write] = skb; |
| 679 | if (skb->len <= 120) { |
| 680 | /* Whole packet fits into descriptor */ |
| 681 | meth_tx_short_prepare(priv, skb); |
| 682 | } else if (PAGE_ALIGN((unsigned long)skb->data) != |
| 683 | PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) { |
| 684 | /* Packet crosses page boundary */ |
| 685 | meth_tx_2page_prepare(priv, skb); |
| 686 | } else { |
| 687 | /* Packet is in one page */ |
| 688 | meth_tx_1page_prepare(priv, skb); |
| 689 | } |
| 690 | priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1); |
| 691 | mace->eth.tx_info = priv->tx_write; |
| 692 | priv->tx_count++; |
| 693 | } |
| 694 | |
| 695 | /* |
| 696 | * Transmit a packet (called by the kernel) |
| 697 | */ |
| 698 | static int meth_tx(struct sk_buff *skb, struct net_device *dev) |
| 699 | { |
| 700 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 701 | unsigned long flags; |
| 702 | |
| 703 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 704 | /* Stop DMA notification */ |
| 705 | priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); |
| 706 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 707 | |
| 708 | meth_add_to_tx_ring(priv, skb); |
| 709 | dev->trans_start = jiffies; /* save the timestamp */ |
| 710 | |
| 711 | /* If TX ring is full, tell the upper layer to stop sending packets */ |
| 712 | if (meth_tx_full(dev)) { |
| 713 | printk(KERN_DEBUG "TX full: stopping\n"); |
| 714 | netif_stop_queue(dev); |
| 715 | } |
| 716 | |
| 717 | /* Restart DMA notification */ |
| 718 | priv->dma_ctrl |= METH_DMA_TX_INT_EN; |
| 719 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 720 | |
| 721 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 722 | |
| 723 | return 0; |
| 724 | } |
| 725 | |
| 726 | /* |
| 727 | * Deal with a transmit timeout. |
| 728 | */ |
| 729 | static void meth_tx_timeout(struct net_device *dev) |
| 730 | { |
| 731 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 732 | unsigned long flags; |
| 733 | |
| 734 | printk(KERN_WARNING "%s: transmit timed out\n", dev->name); |
| 735 | |
| 736 | /* Protect against concurrent rx interrupts */ |
| 737 | spin_lock_irqsave(&priv->meth_lock,flags); |
| 738 | |
| 739 | /* Try to reset the interface. */ |
| 740 | meth_reset(dev); |
| 741 | |
| 742 | priv->stats.tx_errors++; |
| 743 | |
| 744 | /* Clear all rings */ |
| 745 | meth_free_tx_ring(priv); |
| 746 | meth_free_rx_ring(priv); |
| 747 | meth_init_tx_ring(priv); |
| 748 | meth_init_rx_ring(priv); |
| 749 | |
| 750 | /* Restart dma */ |
| 751 | priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; |
| 752 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 753 | |
| 754 | /* Enable interrupt */ |
| 755 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 756 | |
| 757 | dev->trans_start = jiffies; |
| 758 | netif_wake_queue(dev); |
| 759 | |
| 760 | return; |
| 761 | } |
| 762 | |
| 763 | /* |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 764 | * Ioctl commands |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | */ |
| 766 | static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 767 | { |
| 768 | /* XXX Not yet implemented */ |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 769 | switch(cmd) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | case SIOCGMIIPHY: |
| 771 | case SIOCGMIIREG: |
| 772 | case SIOCSMIIREG: |
| 773 | default: |
| 774 | return -EOPNOTSUPP; |
| 775 | } |
| 776 | } |
| 777 | |
| 778 | /* |
| 779 | * Return statistics to the caller |
| 780 | */ |
| 781 | static struct net_device_stats *meth_stats(struct net_device *dev) |
| 782 | { |
| 783 | struct meth_private *priv = (struct meth_private *) dev->priv; |
| 784 | return &priv->stats; |
| 785 | } |
| 786 | |
| 787 | /* |
| 788 | * The init function. |
| 789 | */ |
| 790 | static struct net_device *meth_init(void) |
| 791 | { |
| 792 | struct net_device *dev; |
| 793 | struct meth_private *priv; |
| 794 | int ret; |
| 795 | |
| 796 | dev = alloc_etherdev(sizeof(struct meth_private)); |
| 797 | if (!dev) |
| 798 | return ERR_PTR(-ENOMEM); |
| 799 | |
| 800 | dev->open = meth_open; |
| 801 | dev->stop = meth_release; |
| 802 | dev->hard_start_xmit = meth_tx; |
| 803 | dev->do_ioctl = meth_ioctl; |
| 804 | dev->get_stats = meth_stats; |
| 805 | #ifdef HAVE_TX_TIMEOUT |
| 806 | dev->tx_timeout = meth_tx_timeout; |
| 807 | dev->watchdog_timeo = timeout; |
| 808 | #endif |
| 809 | dev->irq = MACE_ETHERNET_IRQ; |
| 810 | dev->base_addr = (unsigned long)&mace->eth; |
| 811 | |
| 812 | priv = (struct meth_private *) dev->priv; |
| 813 | spin_lock_init(&priv->meth_lock); |
| 814 | |
| 815 | ret = register_netdev(dev); |
| 816 | if (ret) { |
| 817 | free_netdev(dev); |
| 818 | return ERR_PTR(ret); |
| 819 | } |
| 820 | |
| 821 | printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", |
| 822 | dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29)); |
| 823 | return 0; |
| 824 | } |
| 825 | |
| 826 | static struct net_device *meth_dev; |
| 827 | |
| 828 | static int __init meth_init_module(void) |
| 829 | { |
| 830 | meth_dev = meth_init(); |
| 831 | if (IS_ERR(meth_dev)) |
| 832 | return PTR_ERR(meth_dev); |
| 833 | return 0; |
| 834 | } |
| 835 | |
| 836 | static void __exit meth_exit_module(void) |
| 837 | { |
| 838 | unregister_netdev(meth_dev); |
| 839 | free_netdev(meth_dev); |
| 840 | } |
| 841 | |
| 842 | module_init(meth_init_module); |
| 843 | module_exit(meth_exit_module); |